code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
1
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available __A = logging.getLogger(__name__) @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str _UpperCAmelCase :List[str] _UpperCAmelCase :Optional[List[str]] @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :List[int] _UpperCAmelCase :List[int] _UpperCAmelCase :Optional[List[int]] = None _UpperCAmelCase :Optional[List[int]] = None class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "train" _UpperCAmelCase :Optional[int] = "dev" _UpperCAmelCase :List[str] = "test" class UpperCAmelCase : """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ): raise NotImplementedError @staticmethod def _snake_case ( _UpperCAmelCase ): raise NotImplementedError @staticmethod def _snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase=1 , _UpperCAmelCase="[SEP]" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=-100 , _UpperCAmelCase=0 , _UpperCAmelCase=True , ): lowercase__: int = {label: i for i, label in enumerate(_UpperCAmelCase )} lowercase__: int = [] for ex_index, example in enumerate(_UpperCAmelCase ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' , _UpperCAmelCase , len(_UpperCAmelCase ) ) lowercase__: Optional[int] = [] lowercase__: Optional[Any] = [] for word, label in zip(example.words , example.labels ): lowercase__: Tuple = tokenizer.tokenize(_UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_UpperCAmelCase ) > 0: tokens.extend(_UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. lowercase__: List[Any] = tokenizer.num_special_tokens_to_add() if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count: lowercase__: Optional[int] = tokens[: (max_seq_length - special_tokens_count)] lowercase__: Tuple = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] lowercase__: int = [sequence_a_segment_id] * len(_UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: lowercase__: Any = [cls_token] + tokens lowercase__: Dict = [pad_token_label_id] + label_ids lowercase__: List[Any] = [cls_token_segment_id] + segment_ids lowercase__: Dict = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. lowercase__: Dict = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase ) # Zero-pad up to the sequence length. lowercase__: Any = max_seq_length - len(_UpperCAmelCase ) if pad_on_left: lowercase__: Union[str, Any] = ([pad_token] * padding_length) + input_ids lowercase__: int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask lowercase__: Dict = ([pad_token_segment_id] * padding_length) + segment_ids lowercase__: Optional[Any] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(_UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: lowercase__: str = None features.append( InputFeatures( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[InputFeatures] _UpperCAmelCase :int = nn.CrossEntropyLoss().ignore_index def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ): # Load data features from cache or dataset file lowercase__: List[Any] = os.path.join( _UpperCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase__: str = cached_features_file + '''.lock''' with FileLock(_UpperCAmelCase ): if os.path.exists(_UpperCAmelCase ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) lowercase__: Optional[Any] = torch.load(_UpperCAmelCase ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) lowercase__: int = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers lowercase__: Dict = token_classification_task.convert_examples_to_features( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , _UpperCAmelCase ) def __len__( self ): return len(self.features ) def __getitem__( self , _UpperCAmelCase ): return self.features[i] if is_tf_available(): import tensorflow as tf class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :List[InputFeatures] _UpperCAmelCase :int = -100 def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ): lowercase__: List[str] = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers lowercase__: Optional[Any] = token_classification_task.convert_examples_to_features( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: lowercase__: Optional[int] = tf.data.Dataset.from_generator( _UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: lowercase__: Union[str, Any] = tf.data.Dataset.from_generator( _UpperCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def _snake_case ( self ): lowercase__: List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): return len(self.features ) def __getitem__( self , _UpperCAmelCase ): return self.features[i]
2
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: return int((input_a, input_a).count(0 ) == 0 ) def SCREAMING_SNAKE_CASE__ ( ) -> None: assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
2
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
1
"""simple docstring""" import operator as op def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: lowercase__: str = [] lowercase__: List[str] = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation lowercase__: Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) , '''Action'''.center(1_2 ) , '''Stack''' , sep=''' | ''' ) print('''-''' * (3_0 + len(__UpperCAmelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__UpperCAmelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(1_2 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' ) else: lowercase__: List[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(1_2 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' ) lowercase__: str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(1_2 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' ) stack.append( str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(1_2 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' , ) return int(stack[0] ) if __name__ == "__main__": __A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
2
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __A = logging.getLogger(__name__) torch.set_grad_enabled(False) __A = "cuda" if torch.cuda.is_available() else "cpu" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=" " ) -> List[str]: lowercase__: str = text.split(__UpperCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> dict: lowercase__, lowercase__: Any = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__UpperCAmelCase ): titles.append(title if title is not None else '''''' ) texts.append(__UpperCAmelCase ) return {"title": titles, "text": texts} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict: lowercase__: Union[str, Any] = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase__: Optional[Any] = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase__: Tuple = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase__: int = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase__: Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase ) lowercase__: Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase__: Dict = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase__: Optional[Any] = dataset.map( partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , ) # And finally save your dataset lowercase__: Any = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__UpperCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase__: int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__UpperCAmelCase ) # And save the index lowercase__: str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__UpperCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,) _UpperCAmelCase :str = field( default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,) _UpperCAmelCase :str = field( default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } ,) _UpperCAmelCase :Optional[str] = field( default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,) @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :Optional[int] = field( default=_UpperCAmelCase ,metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } ,) _UpperCAmelCase :int = field( default=16 ,metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } ,) @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :int = field( default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,) _UpperCAmelCase :int = field( default=128 ,metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } ,) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __A ,__A ,__A = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __A = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( metadata={"help": "The output directory where the model will be written."} ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: Dict = HfArgumentParser((ModelArguments,) ) ((lowercase__), ): List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__: Tuple = True lowercase__: int = True lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__: int = decoder_config.decoder_start_token_id lowercase__: Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__: Tuple = decoder_config.bos_token_id if pad_token_id is None: lowercase__: Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__: Optional[Any] = decoder_config.eos_token_id lowercase__: Tuple = decoder_start_token_id lowercase__: Dict = pad_token_id lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
1
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=99 , _UpperCAmelCase=13 , _UpperCAmelCase=16 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=30 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ): lowercase__: List[Any] = parent lowercase__: Union[str, Any] = batch_size lowercase__: Any = decoder_seq_length # For common tests lowercase__: Tuple = self.decoder_seq_length lowercase__: Dict = is_training lowercase__: List[str] = use_attention_mask lowercase__: Optional[Any] = use_labels lowercase__: Dict = vocab_size lowercase__: Tuple = d_model lowercase__: Tuple = d_model lowercase__: List[str] = decoder_layers lowercase__: str = decoder_layers lowercase__: List[str] = decoder_ffn_dim lowercase__: Tuple = decoder_attention_heads lowercase__: str = decoder_attention_heads lowercase__: Optional[Any] = eos_token_id lowercase__: int = bos_token_id lowercase__: Optional[int] = pad_token_id lowercase__: Optional[int] = decoder_start_token_id lowercase__: int = use_cache lowercase__: Dict = max_position_embeddings lowercase__: Tuple = None lowercase__: Any = decoder_seq_length lowercase__: int = 2 lowercase__: Dict = 1 def _snake_case ( self ): lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__: Union[str, Any] = None if self.use_attention_mask: lowercase__: str = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__: Any = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__: List[str] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): lowercase__: Optional[int] = True lowercase__: List[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() lowercase__: List[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__: List[str] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) lowercase__: Union[str, Any] = model(_UpperCAmelCase ) lowercase__: Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) lowercase__: List[Any] = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__: Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__: Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__: int = model(_UpperCAmelCase )['''last_hidden_state'''] lowercase__: List[str] = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['''last_hidden_state'''] # select random slice lowercase__: Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__: List[str] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__: List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__, lowercase__: int = config_and_inputs lowercase__: Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () _UpperCAmelCase :Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else () _UpperCAmelCase :int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} _UpperCAmelCase :int = True _UpperCAmelCase :Any = False def _snake_case ( self ): lowercase__: List[str] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase ) lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase ) def _snake_case ( self ): pass def _snake_case ( self ): pass def _snake_case ( self ): pass def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase ) def _snake_case ( self ): return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def _snake_case ( self ): pass
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowercase__: Optional[int] = (boundary[1] - boundary[0]) / steps lowercase__: Any = boundary[0] lowercase__: str = boundary[1] lowercase__: List[str] = make_points(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase__: Optional[Any] = 0.0 y += (h / 2.0) * f(__UpperCAmelCase ) for i in x_i: # print(i) y += h * f(__UpperCAmelCase ) y += (h / 2.0) * f(__UpperCAmelCase ) return y def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: lowercase__: str = a + h while x < (b - h): yield x lowercase__: Tuple = x + h def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]: # enter your function here lowercase__: List[Any] = (x - 0) * (x - 0) return y def SCREAMING_SNAKE_CASE__ ( ) -> int: lowercase__: List[Any] = 0.0 # Lower bound of integration lowercase__: List[Any] = 1.0 # Upper bound of integration lowercase__: int = 1_0.0 # define number of steps or resolution lowercase__: List[str] = [a, b] # define boundary of integration lowercase__: Dict = method_a(__UpperCAmelCase , __UpperCAmelCase ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
1
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ): lowercase__: int = parent lowercase__: List[Any] = batch_size lowercase__: List[Any] = seq_length lowercase__: Optional[int] = is_training lowercase__: List[Any] = use_attention_mask lowercase__: List[Any] = use_token_type_ids lowercase__: Optional[Any] = use_labels lowercase__: Any = vocab_size lowercase__: Optional[int] = hidden_size lowercase__: int = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: Optional[Any] = intermediate_size lowercase__: int = hidden_act lowercase__: int = hidden_dropout_prob lowercase__: Optional[Any] = attention_probs_dropout_prob lowercase__: Optional[Any] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: List[Any] = type_sequence_label_size lowercase__: Any = initializer_range lowercase__: Optional[int] = num_choices def _snake_case ( self ): lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Optional[int] = None if self.use_attention_mask: lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: Dict = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , ) return config, input_ids, attention_mask def _snake_case ( self ): lowercase__: Dict = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__: Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _snake_case ( self ): lowercase__: Optional[Any] = FlaxDistilBertModelTester(self ) @slow def _snake_case ( self ): for model_class_name in self.all_model_classes: lowercase__: List[str] = model_class_name.from_pretrained('''distilbert-base-uncased''' ) lowercase__: Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: List[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) lowercase__: Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase__: Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] lowercase__: int = (1, 11, 768) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[str] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
2
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __A = "<<<<<<< This should probably be modified because it mentions: " __A = "=======\n>>>>>>>\n" __A = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __A = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase ): lowercase__: int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ): lowercase__: List[str] = get_logger('''datasets-cli/converting''' ) lowercase__: Optional[Any] = tfds_path lowercase__: Dict = datasets_directory def _snake_case ( self ): if os.path.isdir(self._tfds_path ): lowercase__: Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__: Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowercase__: int = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowercase__: Tuple = [] lowercase__: Dict = [] lowercase__: Any = {} if os.path.isdir(self._tfds_path ): lowercase__: Dict = os.listdir(_UpperCAmelCase ) else: lowercase__: Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Tuple = f.readlines() lowercase__: Optional[Any] = [] lowercase__: Dict = False lowercase__: List[str] = False lowercase__: List[Any] = [] for line in lines: lowercase__: List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__: Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowercase__: Dict = '''''' continue elif "from absl import logging" in out_line: lowercase__: Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__: Any = True lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowercase__: List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__: Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__: Dict = f_name.replace('''.py''' , '''''' ) lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowercase__: str = os.path.basename(_UpperCAmelCase ) lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
2
1
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __A = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) __A = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) __A = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) __A = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) __A = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 1_4]), ("2H 5D 3C AS 5S", False, [1_4, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) __A = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) __A = ( ("JH AH TH KH QH", 2_3), ("JH 9H TH KH QH", 2_2), ("JC KH JS JD JH", 2_1), ("KH KC 3S 3H 3D", 2_0), ("8C 9C 5C 3C TC", 1_9), ("JS QS 9H TS KH", 1_8), ("7C 7S KH 2H 7H", 1_7), ("3C KH 5D 5S KH", 1_6), ("QH 8H KD JH 8S", 1_5), ("2D 6D 9D TH 7D", 1_4), ) def SCREAMING_SNAKE_CASE__ ( ) -> Any: lowercase__, lowercase__: Dict = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) ) lowercase__: Optional[int] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__, lowercase__: int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0 ) -> Dict: return (generate_random_hand() for _ in range(__UpperCAmelCase )) @pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: assert PokerHand(__UpperCAmelCase )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: assert PokerHand(__UpperCAmelCase )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: lowercase__: Tuple = PokerHand(__UpperCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: assert PokerHand(__UpperCAmelCase )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected def SCREAMING_SNAKE_CASE__ ( ) -> Dict: lowercase__: Union[str, Any] = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS] lowercase__: Any = poker_hands.copy() shuffle(__UpperCAmelCase ) lowercase__: Union[str, Any] = chain(sorted(__UpperCAmelCase ) ) for index, hand in enumerate(__UpperCAmelCase ): assert hand == poker_hands[index] def SCREAMING_SNAKE_CASE__ ( ) -> Dict: # Test that five high straights are compared correctly. lowercase__: str = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=__UpperCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def SCREAMING_SNAKE_CASE__ ( ) -> Dict: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. lowercase__: Any = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__: Union[str, Any] = True lowercase__: Tuple = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def SCREAMING_SNAKE_CASE__ ( ) -> int: # Problem number 54 from Project Euler # Testing from poker_hands.txt file lowercase__: Tuple = 0 lowercase__: Any = os.path.abspath(os.path.dirname(__UpperCAmelCase ) ) lowercase__: List[str] = os.path.join(__UpperCAmelCase , '''poker_hands.txt''' ) with open(__UpperCAmelCase ) as file_hand: for line in file_hand: lowercase__: Union[str, Any] = line[:1_4].strip() lowercase__: Optional[int] = line[1_5:].strip() lowercase__, lowercase__: List[Any] = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase ) lowercase__: List[str] = player.compare_with(__UpperCAmelCase ) if output == "Win": answer += 1 assert answer == 3_7_6
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
1
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float: if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float: if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __UpperCAmelCase , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 ) if __name__ == "__main__": import doctest doctest.testmod()
2
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "rag" _UpperCAmelCase :List[Any] = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' ) lowercase__: Any = question_encoder_config.pop('''model_type''' ) lowercase__: Tuple = kwargs.pop('''generator''' ) lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: str = reduce_loss lowercase__: str = label_smoothing lowercase__: Dict = exclude_bos_score lowercase__: Any = do_marginalize lowercase__: Optional[int] = title_sep lowercase__: Any = doc_sep lowercase__: Any = n_docs lowercase__: List[Any] = max_combined_length lowercase__: int = dataset lowercase__: int = dataset_split lowercase__: str = index_name lowercase__: Dict = retrieval_vector_size lowercase__: Dict = retrieval_batch_size lowercase__: List[str] = passages_path lowercase__: str = index_path lowercase__: Optional[Any] = use_dummy_dataset lowercase__: str = output_retrieved lowercase__: List[str] = do_deduplication lowercase__: List[Any] = use_cache if self.forced_eos_token_id is None: lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase ) @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = copy.deepcopy(self.__dict__ ) lowercase__: str = self.question_encoder.to_dict() lowercase__: str = self.generator.to_dict() lowercase__: str = self.__class__.model_type return output
2
1
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __A = "hf-internal-testing/tiny-random-bert" __A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: Dict = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(os.path.isfile(_UpperCAmelCase ) ) # File is cached at the same place the second time. lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Using a specific revision to test the full commit hash. lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase ) with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: int = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) ) lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: Union[str, Any] = mock.Mock() lowercase__: str = 500 lowercase__: Union[str, Any] = {} lowercase__: List[str] = HTTPError lowercase__: int = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head: lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) def _snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCAmelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' ) lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
2
1
"""simple docstring""" import math from datetime import datetime, timedelta def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> datetime: lowercase__: List[str] = year % 1_9 lowercase__: Dict = year % 4 lowercase__: int = year % 7 lowercase__: Any = math.floor(year / 1_0_0 ) lowercase__: Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) lowercase__: Dict = leap_day_inhibits / 4 lowercase__: str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 lowercase__: List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 lowercase__: Any = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon lowercase__: str = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(__UpperCAmelCase , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(__UpperCAmelCase , 4 , 1_8 ) else: return datetime(__UpperCAmelCase , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): __A = "will be" if year > datetime.now().year else "was" print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
2
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "beit" def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Union[str, Any] = vocab_size lowercase__: List[Any] = hidden_size lowercase__: Optional[int] = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: int = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: List[str] = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = image_size lowercase__: Tuple = patch_size lowercase__: int = num_channels lowercase__: Optional[Any] = use_mask_token lowercase__: List[Any] = use_absolute_position_embeddings lowercase__: Optional[int] = use_relative_position_bias lowercase__: Optional[int] = use_shared_relative_position_bias lowercase__: Optional[Any] = layer_scale_init_value lowercase__: Union[str, Any] = drop_path_rate lowercase__: Tuple = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__: Tuple = out_indices lowercase__: Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__: List[str] = use_auxiliary_head lowercase__: Optional[Any] = auxiliary_loss_weight lowercase__: str = auxiliary_channels lowercase__: List[str] = auxiliary_num_convs lowercase__: Tuple = auxiliary_concat_input lowercase__: Dict = semantic_loss_ignore_index class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): return 1e-4
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
1
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __A = "true" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=8_2 , __UpperCAmelCase=1_6 ) -> Any: set_seed(4_2 ) lowercase__: List[str] = RegressionModel() lowercase__: Dict = deepcopy(__UpperCAmelCase ) lowercase__: str = RegressionDataset(length=__UpperCAmelCase ) lowercase__: str = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase ) model.to(accelerator.device ) lowercase__, lowercase__: int = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict: lowercase__: List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) lowercase__: Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(__UpperCAmelCase ): lowercase__: Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs with accelerator.main_process_first(): lowercase__: Optional[int] = dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) lowercase__: Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__UpperCAmelCase ): if use_longest: return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return DataLoader(__UpperCAmelCase , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1_6 ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: lowercase__: Tuple = Accelerator(dispatch_batches=__UpperCAmelCase , split_batches=__UpperCAmelCase ) lowercase__: List[str] = get_dataloader(__UpperCAmelCase , not dispatch_batches ) lowercase__: Tuple = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__UpperCAmelCase ) lowercase__, lowercase__: Union[str, Any] = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: lowercase__: Optional[Any] = [] for batch in dataloader: lowercase__, lowercase__: List[str] = batch.values() with torch.no_grad(): lowercase__: List[Any] = model(__UpperCAmelCase ) lowercase__, lowercase__: List[Any] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowercase__, lowercase__: Tuple = [], [] for logit, targ in logits_and_targets: logits.append(__UpperCAmelCase ) targs.append(__UpperCAmelCase ) lowercase__, lowercase__: Dict = torch.cat(__UpperCAmelCase ), torch.cat(__UpperCAmelCase ) return logits, targs def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=8_2 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=1_6 ) -> Optional[Any]: lowercase__, lowercase__, lowercase__: int = get_basic_setup(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase__, lowercase__: List[str] = generate_predictions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) assert ( len(__UpperCAmelCase ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCAmelCase )}""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = False , __UpperCAmelCase = False ) -> List[Any]: lowercase__: Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) lowercase__, lowercase__: str = get_mrpc_setup(__UpperCAmelCase , __UpperCAmelCase ) # First do baseline lowercase__, lowercase__, lowercase__: Union[str, Any] = setup['''no'''] model.to(__UpperCAmelCase ) model.eval() for batch in dataloader: batch.to(__UpperCAmelCase ) with torch.inference_mode(): lowercase__: Dict = model(**__UpperCAmelCase ) lowercase__: Dict = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__UpperCAmelCase , references=batch['''labels'''] ) lowercase__: Any = metric.compute() # Then do distributed lowercase__, lowercase__, lowercase__: List[str] = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): lowercase__: int = model(**__UpperCAmelCase ) lowercase__: str = outputs.logits.argmax(dim=-1 ) lowercase__: Any = batch['''labels'''] lowercase__, lowercase__: Optional[Any] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__UpperCAmelCase , references=__UpperCAmelCase ) lowercase__: Any = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: List[str] = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__UpperCAmelCase , __UpperCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowercase__: List[str] = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__UpperCAmelCase , 9_9 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) lowercase__: List[Any] = Accelerator() test_torch_metrics(__UpperCAmelCase , 5_1_2 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
2
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline _UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self ): torch.manual_seed(0 ) lowercase__: Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase__: List[Any] = DDIMScheduler() torch.manual_seed(0 ) lowercase__: Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__: Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase ) lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__: int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): lowercase__: int = torch.manual_seed(_UpperCAmelCase ) lowercase__: List[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[str] = self.get_dummy_components() lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = '''french fries''' lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) lowercase__: Optional[Any] = output.images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 ) lowercase__: List[str] = output.images lowercase__: List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: int = self.get_dummy_components() lowercase__: List[str] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: Any = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[Any] = self.get_dummy_components() lowercase__: Any = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase ) lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _UpperCAmelCase=0 ): lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) lowercase__: int = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Any = '''stabilityai/stable-diffusion-2-base''' lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase ) lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: List[str] = self.get_inputs() lowercase__: Dict = pipe(**_UpperCAmelCase ).images lowercase__: Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self ): lowercase__: int = 0 def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: lowercase__: List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__: Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Any = latents[0, -3:, -3:, -1] lowercase__: List[Any] = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__: Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Optional[Any] = latents[0, -3:, -3:, -1] lowercase__: Any = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__: int = False lowercase__: str = '''stabilityai/stable-diffusion-2-base''' lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base''' lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__: Any = self.get_inputs() lowercase__: List[str] = pipe(**_UpperCAmelCase ) lowercase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
2
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "SpeechT5FeatureExtractor" _UpperCAmelCase :Optional[int] = "SpeechT5Tokenizer" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ): lowercase__: Optional[Any] = kwargs.pop('''audio''' , _UpperCAmelCase ) lowercase__: Tuple = kwargs.pop('''text''' , _UpperCAmelCase ) lowercase__: Dict = kwargs.pop('''text_target''' , _UpperCAmelCase ) lowercase__: Dict = kwargs.pop('''audio_target''' , _UpperCAmelCase ) lowercase__: Union[str, Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase ) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' ) if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' ) if audio is not None: lowercase__: List[Any] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase ) elif text is not None: lowercase__: Optional[int] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase ) else: lowercase__: Optional[Any] = None if audio_target is not None: lowercase__: Union[str, Any] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Tuple = targets['''input_values'''] elif text_target is not None: lowercase__: Dict = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Union[str, Any] = targets['''input_ids'''] else: lowercase__: Tuple = None if inputs is None: return targets if targets is not None: lowercase__: Union[str, Any] = labels lowercase__: Tuple = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: lowercase__: Any = decoder_attention_mask return inputs def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ): lowercase__: Any = kwargs.pop('''input_values''' , _UpperCAmelCase ) lowercase__: List[Any] = kwargs.pop('''input_ids''' , _UpperCAmelCase ) lowercase__: Any = kwargs.pop('''labels''' , _UpperCAmelCase ) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' ) if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' ) if input_values is not None: lowercase__: List[str] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif input_ids is not None: lowercase__: str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase ) else: lowercase__: Tuple = None if labels is not None: if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]): lowercase__: Tuple = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Union[str, Any] = targets['''input_ids'''] else: lowercase__: List[Any] = self.feature_extractor.feature_size lowercase__: List[Any] = self.feature_extractor.num_mel_bins lowercase__: Union[str, Any] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: List[Any] = feature_size_hack lowercase__: str = targets['''input_values'''] else: lowercase__: Optional[Any] = None if inputs is None: return targets if targets is not None: lowercase__: Dict = labels lowercase__: Optional[int] = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: lowercase__: Any = decoder_attention_mask return inputs def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
2
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Dict = DebertaVaTokenizer _UpperCAmelCase :Tuple = DebertaVaTokenizerFast _UpperCAmelCase :int = True _UpperCAmelCase :int = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = '''this is a test''' lowercase__: int = '''this is a test''' return input_text, output_text def _snake_case ( self ): lowercase__: Optional[int] = '''<pad>''' lowercase__: Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_UpperCAmelCase ) , 30001 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _snake_case ( self ): # fmt: off lowercase__: int = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off lowercase__: Dict = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Any = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(_UpperCAmelCase ) lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = '''This is a test''' lowercase__: str = [13, 1, 4398, 25, 21, 1289] lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # fmt: off lowercase__: str = '''I was born in 92000, and this is falsé.''' lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase ) lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , ) @slow def _snake_case ( self ): # fmt: off lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
2
"""simple docstring""" import unittest from transformers import DonutProcessor __A = "naver-clova-ix/donut-base" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__: Union[str, Any] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__: str = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
2
1
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
2
1
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __A = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: lowercase__: Optional[Any] = tensor_name.split('''.''' ) for split in splits[:-1]: lowercase__: str = getattr(__UpperCAmelCase , __UpperCAmelCase ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowercase__: Tuple = new_module lowercase__: Optional[int] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) lowercase__: Tuple = tensor_name in module._buffers lowercase__: Any = getattr(__UpperCAmelCase , __UpperCAmelCase ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) lowercase__: List[str] = False lowercase__: List[Any] = False if is_buffer or not is_bitsandbytes_available(): lowercase__: Any = False lowercase__: Optional[int] = False else: lowercase__: Dict = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) lowercase__: Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: lowercase__: List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: lowercase__: Tuple = old_value.to(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , torch.Tensor ): lowercase__: List[Any] = value.to('''cpu''' ) if value.dtype == torch.inta: lowercase__: List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: lowercase__: Optional[int] = torch.tensor(__UpperCAmelCase , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , __UpperCAmelCase ) and fpaa_statistics is None: lowercase__: Optional[Any] = new_value.T lowercase__: Any = old_value.__dict__ if is_abit: lowercase__: List[str] = bnb.nn.IntaParams(__UpperCAmelCase , requires_grad=__UpperCAmelCase , **__UpperCAmelCase ).to(__UpperCAmelCase ) elif is_abit: lowercase__: str = bnb.nn.Paramsabit(__UpperCAmelCase , requires_grad=__UpperCAmelCase , **__UpperCAmelCase ).to(__UpperCAmelCase ) lowercase__: Optional[int] = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(__UpperCAmelCase ) ) else: if value is None: lowercase__: List[Any] = old_value.to(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , torch.Tensor ): lowercase__: int = value.to(__UpperCAmelCase ) else: lowercase__: Union[str, Any] = torch.tensor(__UpperCAmelCase , device=__UpperCAmelCase ) if is_buffer: lowercase__: List[str] = new_value else: lowercase__: int = nn.Parameter(__UpperCAmelCase , requires_grad=old_value.requires_grad ) lowercase__: Optional[Any] = new_value def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> Union[str, Any]: for name, module in model.named_children(): if current_key_name is None: lowercase__: List[Any] = [] current_key_name.append(__UpperCAmelCase ) if (isinstance(__UpperCAmelCase , nn.Linear ) or isinstance(__UpperCAmelCase , __UpperCAmelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(__UpperCAmelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase__, lowercase__: List[str] = module.weight.shape else: lowercase__: Union[str, Any] = module.in_features lowercase__: Dict = module.out_features if quantization_config.quantization_method() == "llm_int8": lowercase__: Union[str, Any] = bnb.nn.LinearabitLt( __UpperCAmelCase , __UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) lowercase__: str = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: lowercase__: int = bnb.nn.Linearabit( __UpperCAmelCase , __UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) lowercase__: Tuple = True # Store the module class in case we need to transpose the weight later lowercase__: Optional[int] = type(__UpperCAmelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(__UpperCAmelCase ) if len(list(module.children() ) ) > 0: lowercase__, lowercase__: List[str] = _replace_with_bnb_linear( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , has_been_replaced=__UpperCAmelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> int: lowercase__: Tuple = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert lowercase__, lowercase__: str = _replace_with_bnb_linear( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE__ ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __UpperCAmelCase , ) return replace_with_bnb_linear(*__UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __UpperCAmelCase , ) return set_module_quantized_tensor_to_device(*__UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any: lowercase__: Dict = deepcopy(__UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() lowercase__: List[Any] = find_tied_parameters(__UpperCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase__: Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase__: Dict = sum(__UpperCAmelCase , [] ) lowercase__: List[str] = len(__UpperCAmelCase ) > 0 # Check if it is a base model lowercase__: str = not hasattr(__UpperCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase__: List[Any] = list(model.named_children() ) lowercase__: Tuple = [list_modules[-1][0]] # add last module together with tied weights lowercase__: Optional[int] = set(__UpperCAmelCase ) - set(__UpperCAmelCase ) lowercase__: Tuple = list(set(__UpperCAmelCase ) ) + list(__UpperCAmelCase ) # remove ".weight" from the keys lowercase__: Any = ['''.weight''', '''.bias'''] lowercase__: List[Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase__: Optional[Any] = name.replace(__UpperCAmelCase , '''''' ) filtered_module_names.append(__UpperCAmelCase ) return filtered_module_names
2
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 2_5_6 class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = ["melgan"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() # From MELGAN lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training. lowercase__: Union[str, Any] = 4.0 # Largest value for most examples lowercase__: Union[str, Any] = 128 self.register_modules( notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: int = output_range if clip: lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: str = input_range lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowercase__: Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = input_tokens > 0 lowercase__, lowercase__: str = self.notes_encoder( encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.continuous_encoder( encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = noise_time if not torch.is_tensor(_UpperCAmelCase ): lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: lowercase__: str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__: Union[str, Any] = self.decoder( encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(_UpperCAmelCase )}.""" ) lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCAmelCase ): if i == 0: lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__: Union[str, Any] = ones lowercase__: str = self.scale_features( _UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase ) lowercase__: Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__: int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__: List[Any] = self.decode( encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] ) lowercase__: Dict = mel[:1] lowercase__: List[Any] = mel.cpu().float().numpy() lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase ) logger.info('''Generated segment''' , _UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__: Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCAmelCase )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: lowercase__: Tuple = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__: List[str] = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) lowercase__: Any = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) lowercase__: Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) lowercase__: Optional[Any] = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) lowercase__: Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) lowercase__: Union[str, Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) lowercase__: Tuple = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) lowercase__: Dict = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) lowercase__: str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) lowercase__: Optional[Any] = key.replace('''image_encoder.module''' , '''flava.image_model''' ) lowercase__: Optional[Any] = key.replace('''text_encoder.module''' , '''flava.text_model''' ) lowercase__: Union[str, Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) lowercase__: int = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) lowercase__: Union[str, Any] = key.replace('''text_projection''' , '''flava.text_projection''' ) lowercase__: Union[str, Any] = key.replace('''image_projection''' , '''flava.image_projection''' ) lowercase__: List[Any] = value.float() for key, value in codebook_state_dict.items(): lowercase__: List[Any] = value return upgrade @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]: if config_path is not None: lowercase__: Any = FlavaConfig.from_pretrained(__UpperCAmelCase ) else: lowercase__: Any = FlavaConfig() lowercase__: str = FlavaForPreTraining(__UpperCAmelCase ).eval() lowercase__: int = convert_dalle_checkpoint(__UpperCAmelCase , __UpperCAmelCase , save_checkpoint=__UpperCAmelCase ) if os.path.exists(__UpperCAmelCase ): lowercase__: Optional[int] = torch.load(__UpperCAmelCase , map_location='''cpu''' ) else: lowercase__: int = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' ) lowercase__: Tuple = upgrade_state_dict(__UpperCAmelCase , __UpperCAmelCase ) hf_model.load_state_dict(__UpperCAmelCase ) lowercase__: List[Any] = hf_model.state_dict() lowercase__: List[str] = count_parameters(__UpperCAmelCase ) lowercase__: Optional[int] = count_parameters(__UpperCAmelCase ) + count_parameters(__UpperCAmelCase ) assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
2
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: List[str] = seq_length lowercase__: Optional[int] = is_training lowercase__: Dict = use_input_mask lowercase__: List[Any] = use_token_type_ids lowercase__: List[str] = use_labels lowercase__: Union[str, Any] = vocab_size lowercase__: str = hidden_size lowercase__: Any = embedding_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[int] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: Dict = num_labels lowercase__: int = num_choices lowercase__: int = scope def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[Any] = None lowercase__: Any = None lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: str = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_choices lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): lowercase__: Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def _snake_case ( self ): lowercase__: int = MobileBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: return torch.tensor( __UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , ) __A = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase ) lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase__: Tuple = model(_UpperCAmelCase )[0] lowercase__: Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor( [ [ [-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5], [-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0], [2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
2
1
"""simple docstring""" __A = [ (1_0_0_0, "M"), (9_0_0, "CM"), (5_0_0, "D"), (4_0_0, "CD"), (1_0_0, "C"), (9_0, "XC"), (5_0, "L"), (4_0, "XL"), (1_0, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: List[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} lowercase__: Any = 0 lowercase__: Optional[int] = 0 while place < len(__UpperCAmelCase ): if (place + 1 < len(__UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: lowercase__: List[Any] = [] for arabic, roman in ROMAN: ((lowercase__), (lowercase__)): List[Any] = divmod(__UpperCAmelCase , __UpperCAmelCase ) result.append(roman * factor ) if number == 0: break return "".join(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
2
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<eop>", "<eod>"] , _UpperCAmelCase = None , **_UpperCAmelCase , ): lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token lowercase__: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) lowercase__: Union[str, Any] = 3 lowercase__: Dict = do_lower_case lowercase__: Tuple = remove_space lowercase__: Optional[Any] = keep_accents lowercase__: Optional[Any] = vocab_file lowercase__: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( '''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ''' '''See https://pypi.org/project/jieba/ for installation.''' ) lowercase__: Union[str, Any] = jieba lowercase__: Any = str.maketrans(''' \n''' , '''\u2582\u2583''' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _snake_case ( self ): return len(self.sp_model ) def _snake_case ( self ): lowercase__: int = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowercase__: int = self.__dict__.copy() lowercase__: Optional[int] = None return state def __setstate__( self , _UpperCAmelCase ): lowercase__: int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__: Union[str, Any] = {} lowercase__: Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _UpperCAmelCase ): if self.remove_space: lowercase__: Optional[int] = ''' '''.join(inputs.strip().split() ) else: lowercase__: Optional[int] = inputs lowercase__: Any = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase__: Union[str, Any] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase ) lowercase__: Optional[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: lowercase__: Optional[int] = outputs.lower() return outputs def _snake_case ( self , _UpperCAmelCase ): lowercase__: Dict = self.preprocess_text(_UpperCAmelCase ) lowercase__: int = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) lowercase__: Union[str, Any] = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase__: Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase__: Tuple = cur_pieces[1:] else: lowercase__: str = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def _snake_case ( self , _UpperCAmelCase ): return self.sp_model.PieceToId(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[Any] = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip() return out_string def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[int] = [self.sep_token_id] lowercase__: Tuple = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: List[str] = [self.sep_token_id] lowercase__: Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__: List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: lowercase__: Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ): lowercase__: List[str] = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Dict = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' ) return text
2
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = BlenderbotConfig _UpperCAmelCase :Dict = {} _UpperCAmelCase :Tuple = "gelu" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ): lowercase__: Optional[Any] = parent lowercase__: Any = batch_size lowercase__: Optional[Any] = seq_length lowercase__: Optional[int] = is_training lowercase__: Optional[Any] = use_labels lowercase__: List[Any] = vocab_size lowercase__: Tuple = hidden_size lowercase__: str = num_hidden_layers lowercase__: str = num_attention_heads lowercase__: Dict = intermediate_size lowercase__: Dict = hidden_dropout_prob lowercase__: Any = attention_probs_dropout_prob lowercase__: Any = max_position_embeddings lowercase__: str = eos_token_id lowercase__: List[str] = pad_token_id lowercase__: List[Any] = bos_token_id def _snake_case ( self ): lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__: Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__: List[str] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = TFBlenderbotModel(config=_UpperCAmelCase ).get_decoder() lowercase__: Dict = inputs_dict['''input_ids'''] lowercase__: List[Any] = input_ids[:1, :] lowercase__: int = inputs_dict['''attention_mask'''][:1, :] lowercase__: Union[str, Any] = inputs_dict['''head_mask'''] lowercase__: List[Any] = 1 # first forward pass lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) lowercase__, lowercase__: Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__: Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__: Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__: Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__: str = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__: List[Any] = output_from_no_past[:, -3:, random_slice_idx] lowercase__: int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[str]: if attention_mask is None: lowercase__: Union[str, Any] = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__: Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__: Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () _UpperCAmelCase :Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () _UpperCAmelCase :Optional[Any] = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) _UpperCAmelCase :List[str] = True _UpperCAmelCase :Dict = False _UpperCAmelCase :Optional[Any] = False def _snake_case ( self ): lowercase__: Optional[Any] = TFBlenderbotModelTester(self ) lowercase__: Any = ConfigTester(self , config_class=_UpperCAmelCase ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = ["My friends are cool but they eat too many carbs."] _UpperCAmelCase :str = "facebook/blenderbot-400M-distill" @cached_property def _snake_case ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def _snake_case ( self ): lowercase__: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _snake_case ( self ): lowercase__: List[str] = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowercase__: Union[str, Any] = self.model.generate( model_inputs.input_ids , ) lowercase__: Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
2
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = "xlm" _UpperCAmelCase :List[Any] = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , _UpperCAmelCase=30145 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=512 , _UpperCAmelCase=2048**-0.5 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=5 , _UpperCAmelCase=True , _UpperCAmelCase="first" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ): lowercase__: Optional[Any] = vocab_size lowercase__: Any = emb_dim lowercase__: Optional[int] = n_layers lowercase__: Optional[int] = n_heads lowercase__: Dict = dropout lowercase__: str = attention_dropout lowercase__: Union[str, Any] = gelu_activation lowercase__: Union[str, Any] = sinusoidal_embeddings lowercase__: Union[str, Any] = causal lowercase__: int = asm lowercase__: Tuple = n_langs lowercase__: Tuple = use_lang_emb lowercase__: Union[str, Any] = layer_norm_eps lowercase__: List[str] = bos_index lowercase__: Optional[Any] = eos_index lowercase__: int = pad_index lowercase__: Union[str, Any] = unk_index lowercase__: Union[str, Any] = mask_index lowercase__: Tuple = is_encoder lowercase__: Dict = max_position_embeddings lowercase__: Optional[int] = embed_init_std lowercase__: Tuple = init_std lowercase__: List[Any] = summary_type lowercase__: List[str] = summary_use_proj lowercase__: Optional[int] = summary_activation lowercase__: Any = summary_proj_to_labels lowercase__: Any = summary_first_dropout lowercase__: List[str] = start_n_top lowercase__: Any = end_n_top lowercase__: Optional[Any] = mask_token_id lowercase__: Union[str, Any] = lang_id if "n_words" in kwargs: lowercase__: Tuple = kwargs['''n_words'''] super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @property def _snake_case ( self ): if self.task == "multiple-choice": lowercase__: int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase__: int = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
2
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: while second != 0: lowercase__: List[Any] = first & second first ^= second lowercase__: List[Any] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __A = int(input("Enter the first number: ").strip()) __A = int(input("Enter the second number: ").strip()) print(f'''{add(first, second) = }''')
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( metadata={"help": "The output directory where the model will be written."} ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: Dict = HfArgumentParser((ModelArguments,) ) ((lowercase__), ): List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__: Tuple = True lowercase__: int = True lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__: int = decoder_config.decoder_start_token_id lowercase__: Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__: Tuple = decoder_config.bos_token_id if pad_token_id is None: lowercase__: Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__: Optional[Any] = decoder_config.eos_token_id lowercase__: Tuple = decoder_start_token_id lowercase__: Dict = pad_token_id lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = ["pixel_values"] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: str = size if size is not None else {'''shortest_edge''': 224} lowercase__: List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) lowercase__: List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase__: Dict = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='''crop_size''' ) lowercase__: Optional[Any] = do_resize lowercase__: Any = size lowercase__: Optional[Any] = resample lowercase__: List[Any] = do_center_crop lowercase__: Optional[Any] = crop_size lowercase__: Optional[Any] = do_rescale lowercase__: Any = rescale_factor lowercase__: List[Any] = do_normalize lowercase__: str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__: Dict = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__: str = do_convert_rgb def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ): lowercase__: Tuple = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase__: Dict = get_resize_output_image_size(_UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCAmelCase ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): lowercase__: Dict = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ): lowercase__: List[Any] = do_resize if do_resize is not None else self.do_resize lowercase__: Optional[int] = size if size is not None else self.size lowercase__: Optional[int] = get_size_dict(_UpperCAmelCase , param_name='''size''' , default_to_square=_UpperCAmelCase ) lowercase__: str = resample if resample is not None else self.resample lowercase__: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__: List[str] = crop_size if crop_size is not None else self.crop_size lowercase__: Dict = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' , default_to_square=_UpperCAmelCase ) lowercase__: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__: int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__: str = do_normalize if do_normalize is not None else self.do_normalize lowercase__: List[Any] = image_mean if image_mean is not None else self.image_mean lowercase__: int = image_std if image_std is not None else self.image_std lowercase__: int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__: Tuple = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__: Union[str, Any] = [convert_to_rgb(_UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowercase__: Optional[int] = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: lowercase__: str = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_center_crop: lowercase__: Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images] if do_rescale: lowercase__: Any = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: lowercase__: Optional[Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] lowercase__: Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] lowercase__: Dict = {'''pixel_values''': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", # See all REALM models at https://huggingface.co/models?filter=realm } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "realm" def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=128 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=8 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=256 , _UpperCAmelCase=10 , _UpperCAmelCase=1e-3 , _UpperCAmelCase=5 , _UpperCAmelCase=320 , _UpperCAmelCase=13353718 , _UpperCAmelCase=5000 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) # Common config lowercase__: int = vocab_size lowercase__: Tuple = max_position_embeddings lowercase__: Optional[int] = hidden_size lowercase__: str = retriever_proj_size lowercase__: Dict = num_hidden_layers lowercase__: Union[str, Any] = num_attention_heads lowercase__: Optional[Any] = num_candidates lowercase__: Dict = intermediate_size lowercase__: Optional[int] = hidden_act lowercase__: str = hidden_dropout_prob lowercase__: Optional[Any] = attention_probs_dropout_prob lowercase__: Any = initializer_range lowercase__: str = type_vocab_size lowercase__: str = layer_norm_eps # Reader config lowercase__: Tuple = span_hidden_size lowercase__: Optional[Any] = max_span_width lowercase__: Tuple = reader_layer_norm_eps lowercase__: Tuple = reader_beam_size lowercase__: Optional[int] = reader_seq_len # Retrieval config lowercase__: int = num_block_records lowercase__: Any = searcher_beam_size
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
1
"""simple docstring""" import argparse from collections import defaultdict import yaml __A = "docs/source/en/_toctree.yml" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: lowercase__: int = defaultdict(__UpperCAmelCase ) for doc in model_doc: counts[doc["local"]] += 1 lowercase__: List[str] = [key for key, value in counts.items() if value > 1] lowercase__: Optional[Any] = [] for duplicate_key in duplicates: lowercase__: Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__UpperCAmelCase ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=False ) -> str: with open(__UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Dict = yaml.safe_load(f.read() ) # Get to the API doc lowercase__: int = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase__: int = content[api_idx]['''sections'''] # Then to the model doc lowercase__: List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase__: Union[str, Any] = api_doc[model_idx]['''sections'''] lowercase__: int = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if '''sections''' in section] lowercase__: List[Any] = False for idx, modality_doc in modalities_docs: lowercase__: List[Any] = modality_doc['''sections'''] lowercase__: List[Any] = clean_model_doc_toc(__UpperCAmelCase ) if old_modality_doc != new_modality_doc: lowercase__: Dict = True if overwrite: lowercase__: int = new_modality_doc if diff: if overwrite: lowercase__: Optional[Any] = model_doc lowercase__: List[str] = api_doc with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
2
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
1
"""simple docstring""" import heapq import sys import numpy as np __A = tuple[int, int] class UpperCAmelCase : """simple docstring""" def __init__( self ): lowercase__: Dict = [] lowercase__: List[str] = set() def _snake_case ( self ): if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def _snake_case ( self ): return len(self.elements ) == 0 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_UpperCAmelCase ) else: # update # print("update", item) lowercase__: Tuple = [] ((lowercase__), (lowercase__)): Dict = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((lowercase__), (lowercase__)): Optional[Any] = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _snake_case ( self , _UpperCAmelCase ): if item in self.set: self.set.remove(_UpperCAmelCase ) lowercase__: List[Any] = [] ((lowercase__), (lowercase__)): Dict = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((lowercase__), (lowercase__)): Optional[int] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _snake_case ( self ): return self.elements[0][1] def _snake_case ( self ): ((lowercase__), (lowercase__)): Any = heapq.heappop(self.elements ) self.set.remove(_UpperCAmelCase ) return (priority, item) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: # euclidean distance lowercase__: Dict = np.array(__UpperCAmelCase ) lowercase__: List[Any] = np.array(__UpperCAmelCase ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: # integer division by time variable return consistent_heuristic(__UpperCAmelCase , __UpperCAmelCase ) // t def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: lowercase__: Optional[int] = g_function[start] + Wa * heuristics[i](__UpperCAmelCase , __UpperCAmelCase ) return ans def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: lowercase__: Tuple = np.chararray((n, n) ) for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): lowercase__: Dict = '''*''' for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): if (j, (n - 1) - i) in blocks: lowercase__: Dict = '''#''' lowercase__: Any = '''-''' lowercase__: Optional[Any] = back_pointer[goal] while x != start: ((lowercase__), (lowercase__)): Optional[int] = x # print(x) lowercase__: Optional[Any] = '''-''' lowercase__: List[str] = back_pointer[x] lowercase__: List[Any] = '''-''' for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) lowercase__: List[str] = back_pointer[goal] while x != start: print(__UpperCAmelCase , end=''' ''' ) lowercase__: int = back_pointer[x] print(__UpperCAmelCase ) sys.exit() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: for itera in range(__UpperCAmelCase ): open_list[itera].remove_element(__UpperCAmelCase ) # print("s", s) # print("j", j) ((lowercase__), (lowercase__)): Optional[Any] = s lowercase__: Any = (x - 1, y) lowercase__: int = (x + 1, y) lowercase__: Union[str, Any] = (x, y + 1) lowercase__: int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__UpperCAmelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__UpperCAmelCase ) lowercase__: List[str] = -1 lowercase__: Any = float('''inf''' ) if valid(__UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1: lowercase__: int = g_function[s] + 1 lowercase__: Union[str, Any] = s if neighbours not in close_list_anchor: open_list[0].put(__UpperCAmelCase , key(__UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase ) ) if neighbours not in close_list_inad: for var in range(1 , __UpperCAmelCase ): if key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) <= Wa * key( __UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase ): open_list[j].put( __UpperCAmelCase , key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: lowercase__: Optional[Any] = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __A = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] __A = make_common_ground() __A = blocks_blk # hyper parameters __A = 1 __A = 1 __A = 2_0 __A = 3 # one consistent and two other inconsistent # start and end destination __A = (0, 0) __A = (n - 1, n - 1) __A = 1 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: lowercase__: Any = {start: 0, goal: float('''inf''' )} lowercase__: int = {start: -1, goal: -1} lowercase__: Dict = [] lowercase__: Dict = set() for i in range(__UpperCAmelCase ): open_list.append(PriorityQueue() ) open_list[i].put(__UpperCAmelCase , key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ) lowercase__: list[int] = [] lowercase__: list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __UpperCAmelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowercase__, lowercase__: Optional[Any] = open_list[i].top_show() visited.add(__UpperCAmelCase ) expand_state( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) close_list_inad.append(__UpperCAmelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowercase__: Optional[Any] = open_list[0].top_show() visited.add(__UpperCAmelCase ) expand_state( __UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) close_list_anchor.append(__UpperCAmelCase ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__UpperCAmelCase ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
2
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
1
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: return "".join(sorted(__UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]: return word_by_signature[signature(__UpperCAmelCase )] __A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __A = sorted({word.strip().lower() for word in data.splitlines()}) __A = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
2
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __A = "<<<<<<< This should probably be modified because it mentions: " __A = "=======\n>>>>>>>\n" __A = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __A = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase ): lowercase__: int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ): lowercase__: List[str] = get_logger('''datasets-cli/converting''' ) lowercase__: Optional[Any] = tfds_path lowercase__: Dict = datasets_directory def _snake_case ( self ): if os.path.isdir(self._tfds_path ): lowercase__: Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__: Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowercase__: int = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowercase__: Tuple = [] lowercase__: Dict = [] lowercase__: Any = {} if os.path.isdir(self._tfds_path ): lowercase__: Dict = os.listdir(_UpperCAmelCase ) else: lowercase__: Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Tuple = f.readlines() lowercase__: Optional[Any] = [] lowercase__: Dict = False lowercase__: List[str] = False lowercase__: List[Any] = [] for line in lines: lowercase__: List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__: Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowercase__: Dict = '''''' continue elif "from absl import logging" in out_line: lowercase__: Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__: Any = True lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowercase__: List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__: Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__: Dict = f_name.replace('''.py''' , '''''' ) lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowercase__: str = os.path.basename(_UpperCAmelCase ) lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
1
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets __A = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" __A = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" __A = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase (datasets.Metric ): """simple docstring""" def _snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def _snake_case ( self ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="uniform_average" , _UpperCAmelCase=True ): lowercase__: Tuple = mean_squared_error( _UpperCAmelCase , _UpperCAmelCase , sample_weight=_UpperCAmelCase , multioutput=_UpperCAmelCase , squared=_UpperCAmelCase ) return {"mse": mse}
2
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "rag" _UpperCAmelCase :List[Any] = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' ) lowercase__: Any = question_encoder_config.pop('''model_type''' ) lowercase__: Tuple = kwargs.pop('''generator''' ) lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: str = reduce_loss lowercase__: str = label_smoothing lowercase__: Dict = exclude_bos_score lowercase__: Any = do_marginalize lowercase__: Optional[int] = title_sep lowercase__: Any = doc_sep lowercase__: Any = n_docs lowercase__: List[Any] = max_combined_length lowercase__: int = dataset lowercase__: int = dataset_split lowercase__: str = index_name lowercase__: Dict = retrieval_vector_size lowercase__: Dict = retrieval_batch_size lowercase__: List[str] = passages_path lowercase__: str = index_path lowercase__: Optional[Any] = use_dummy_dataset lowercase__: str = output_retrieved lowercase__: List[str] = do_deduplication lowercase__: List[Any] = use_cache if self.forced_eos_token_id is None: lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase ) @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = copy.deepcopy(self.__dict__ ) lowercase__: str = self.question_encoder.to_dict() lowercase__: str = self.generator.to_dict() lowercase__: str = self.__class__.model_type return output
2
1
"""simple docstring""" __A = 9.8_0665 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = g ) -> float: if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
2
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __A = "hf-internal-testing/tiny-random-bert" __A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: Dict = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(os.path.isfile(_UpperCAmelCase ) ) # File is cached at the same place the second time. lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Using a specific revision to test the full commit hash. lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase ) with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: int = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) ) lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: Union[str, Any] = mock.Mock() lowercase__: str = 500 lowercase__: Union[str, Any] = {} lowercase__: List[str] = HTTPError lowercase__: int = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head: lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) def _snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCAmelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' ) lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
2
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "beit" def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Union[str, Any] = vocab_size lowercase__: List[Any] = hidden_size lowercase__: Optional[int] = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: int = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: List[str] = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = image_size lowercase__: Tuple = patch_size lowercase__: int = num_channels lowercase__: Optional[Any] = use_mask_token lowercase__: List[Any] = use_absolute_position_embeddings lowercase__: Optional[int] = use_relative_position_bias lowercase__: Optional[int] = use_shared_relative_position_bias lowercase__: Optional[Any] = layer_scale_init_value lowercase__: Union[str, Any] = drop_path_rate lowercase__: Tuple = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__: Tuple = out_indices lowercase__: Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__: List[str] = use_auxiliary_head lowercase__: Optional[Any] = auxiliary_loss_weight lowercase__: str = auxiliary_channels lowercase__: List[str] = auxiliary_num_convs lowercase__: Tuple = auxiliary_concat_input lowercase__: Dict = semantic_loss_ignore_index class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): return 1e-4
2
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}, "tokenizer_file": { "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json" }, } __A = {"mobilebert-uncased": 5_1_2} __A = {} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = VOCAB_FILES_NAMES _UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Dict = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Dict = MobileBertTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars ): lowercase__: Optional[Any] = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) ) lowercase__: Any = do_lower_case lowercase__: List[Any] = strip_accents lowercase__: List[Any] = tokenize_chinese_chars lowercase__: Tuple = normalizer_class(**_UpperCAmelCase ) lowercase__: Dict = do_lower_case def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): lowercase__: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Union[str, Any] = [self.sep_token_id] lowercase__: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
1
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __A = logging.getLogger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase=-1 ): # in NER datasets, the last column is usually reserved for NER label lowercase__: Union[str, Any] = label_idx def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = mode.value lowercase__: Dict = os.path.join(_UpperCAmelCase , F"""{mode}.txt""" ) lowercase__: Tuple = 1 lowercase__: Tuple = [] with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Any = [] lowercase__: Dict = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) guid_index += 1 lowercase__: Optional[Any] = [] lowercase__: Optional[Any] = [] else: lowercase__: Any = line.split(''' ''' ) words.append(splits[0] ) if len(_UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) return examples def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(_UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__: Optional[Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(_UpperCAmelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def _snake_case ( self , _UpperCAmelCase ): if path: with open(_UpperCAmelCase , '''r''' ) as f: lowercase__: Optional[int] = f.read().splitlines() if "O" not in labels: lowercase__: int = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _snake_case ( self , _UpperCAmelCase ): if path: with open(_UpperCAmelCase , '''r''' ) as f: lowercase__: List[str] = f.read().splitlines() if "O" not in labels: lowercase__: Union[str, Any] = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = mode.value lowercase__: List[str] = os.path.join(_UpperCAmelCase , F"""{mode}.txt""" ) lowercase__: Optional[int] = 1 lowercase__: int = [] with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(_UpperCAmelCase ): lowercase__: str = [] lowercase__: Optional[int] = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) guid_index += 1 return examples def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = 0 for sentence in parse_incr(_UpperCAmelCase ): lowercase__: Optional[Any] = preds_list[example_id] lowercase__: List[Any] = '''''' for token in sentence: out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(_UpperCAmelCase ) example_id += 1 def _snake_case ( self , _UpperCAmelCase ): if path: with open(_UpperCAmelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline _UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self ): torch.manual_seed(0 ) lowercase__: Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase__: List[Any] = DDIMScheduler() torch.manual_seed(0 ) lowercase__: Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__: Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase ) lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__: int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): lowercase__: int = torch.manual_seed(_UpperCAmelCase ) lowercase__: List[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[str] = self.get_dummy_components() lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = '''french fries''' lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) lowercase__: Optional[Any] = output.images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 ) lowercase__: List[str] = output.images lowercase__: List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: int = self.get_dummy_components() lowercase__: List[str] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: Any = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[Any] = self.get_dummy_components() lowercase__: Any = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase ) lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _UpperCAmelCase=0 ): lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) lowercase__: int = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Any = '''stabilityai/stable-diffusion-2-base''' lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase ) lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: List[str] = self.get_inputs() lowercase__: Dict = pipe(**_UpperCAmelCase ).images lowercase__: Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self ): lowercase__: int = 0 def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: lowercase__: List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__: Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Any = latents[0, -3:, -3:, -1] lowercase__: List[Any] = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__: Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Optional[Any] = latents[0, -3:, -3:, -1] lowercase__: Any = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__: int = False lowercase__: str = '''stabilityai/stable-diffusion-2-base''' lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base''' lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__: Any = self.get_inputs() lowercase__: List[str] = pipe(**_UpperCAmelCase ) lowercase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
2
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A = 1_6 __A = 3_2 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 1_6 ) -> Union[str, Any]: lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__: int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) lowercase__: List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__: int = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__: Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__: Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__: List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowercase__: Optional[Any] = 8 else: lowercase__: Optional[Any] = None return tokenizer.pad( __UpperCAmelCase , padding='''longest''' , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__: Tuple = DataLoader( tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) lowercase__: Tuple = DataLoader( tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCAmelCase ) == "1": lowercase__: int = 2 # Initialize accelerator lowercase__: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__: List[str] = config['''lr'''] lowercase__: Optional[Any] = int(config['''num_epochs'''] ) lowercase__: Optional[Any] = int(config['''seed'''] ) lowercase__: Optional[int] = int(config['''batch_size'''] ) lowercase__: List[str] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__UpperCAmelCase ) def inner_training_loop(__UpperCAmelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__: str = model.to(accelerator.device ) # Instantiate optimizer lowercase__: str = AdamW(params=model.parameters() , lr=__UpperCAmelCase ) lowercase__, lowercase__: Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase ) # Instantiate scheduler lowercase__: int = get_linear_schedule_with_warmup( optimizer=__UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__, lowercase__, lowercase__, lowercase__, lowercase__: Any = accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Now we train the model for epoch in range(__UpperCAmelCase ): model.train() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__: List[Any] = model(**__UpperCAmelCase ) lowercase__: Union[str, Any] = outputs.loss accelerator.backward(__UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__: Tuple = model(**__UpperCAmelCase ) lowercase__: int = outputs.logits.argmax(dim=-1 ) lowercase__, lowercase__: Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__UpperCAmelCase , references=__UpperCAmelCase , ) lowercase__: Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __UpperCAmelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> int: lowercase__: List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__: int = parser.parse_args() lowercase__: str = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
2
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Dict = DebertaVaTokenizer _UpperCAmelCase :Tuple = DebertaVaTokenizerFast _UpperCAmelCase :int = True _UpperCAmelCase :int = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = '''this is a test''' lowercase__: int = '''this is a test''' return input_text, output_text def _snake_case ( self ): lowercase__: Optional[int] = '''<pad>''' lowercase__: Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_UpperCAmelCase ) , 30001 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _snake_case ( self ): # fmt: off lowercase__: int = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off lowercase__: Dict = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Any = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(_UpperCAmelCase ) lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = '''This is a test''' lowercase__: str = [13, 1, 4398, 25, 21, 1289] lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # fmt: off lowercase__: str = '''I was born in 92000, and this is falsé.''' lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase ) lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , ) @slow def _snake_case ( self ): # fmt: off lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
"""simple docstring""" import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ): lowercase__: List[str] = parent lowercase__: List[str] = batch_size lowercase__: str = num_channels lowercase__: Optional[int] = is_training lowercase__: Optional[int] = use_labels lowercase__: Tuple = hidden_dropout_prob lowercase__: Optional[int] = attention_probs_dropout_prob lowercase__: Optional[Any] = num_labels lowercase__: List[Any] = image_size lowercase__: Union[str, Any] = layer_depths lowercase__: Optional[int] = embed_dims def _snake_case ( self ): lowercase__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: Optional[Any] = None if self.use_labels: lowercase__: List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowercase__: Dict = self.get_config() return config, pixel_values, labels def _snake_case ( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = SwiftFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = self.num_labels lowercase__: Any = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowercase__: List[Any] = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ): ((lowercase__), (lowercase__), (lowercase__)): List[str] = self.prepare_config_and_inputs() lowercase__: Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _UpperCAmelCase :Any = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :str = False _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Dict = False _UpperCAmelCase :Tuple = False _UpperCAmelCase :Union[str, Any] = False def _snake_case ( self ): lowercase__: Dict = SwiftFormerModelTester(self ) lowercase__: int = ConfigTester( self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _snake_case ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def _snake_case ( self ): pass def _snake_case ( self ): lowercase__, lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Optional[Any] = model_class(_UpperCAmelCase ) lowercase__: List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def _snake_case ( self ): lowercase__, lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Optional[Any] = model_class(_UpperCAmelCase ) lowercase__: List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__: Optional[int] = [*signature.parameters.keys()] lowercase__: Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def _snake_case ( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: List[str] = SwiftFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def _snake_case ( self ): pass def _snake_case ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): lowercase__: List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) lowercase__: List[Any] = outputs.hidden_states lowercase__: str = 8 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowercase__, lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Dict = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__: Optional[Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): def _config_zero_init(_UpperCAmelCase ): lowercase__: str = copy.deepcopy(_UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-1_0 ) if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ): lowercase__: Optional[Any] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return configs_no_init lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__: Union[str, Any] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: lowercase__: Optional[int] = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _snake_case ( self ): pass def SCREAMING_SNAKE_CASE__ ( ) -> int: lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self ): return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def _snake_case ( self ): lowercase__: List[str] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase ) lowercase__: Any = self.default_image_processor lowercase__: Any = prepare_img() lowercase__: int = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): lowercase__: Optional[int] = model(**_UpperCAmelCase ) # verify the logits lowercase__: Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) lowercase__: Union[str, Any] = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
2
"""simple docstring""" import unittest from transformers import DonutProcessor __A = "naver-clova-ix/donut-base" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__: Union[str, Any] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__: str = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
2
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ImageGPTImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
2
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
2
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: lowercase__: Dict = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__, lowercase__: Any = emb.weight.shape lowercase__: Any = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) lowercase__: List[Any] = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase="facebook/mbart-large-en-ro" , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Dict: lowercase__: Optional[int] = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model'''] remove_ignore_keys_(__UpperCAmelCase ) lowercase__: str = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__: str = MBartConfig.from_pretrained(__UpperCAmelCase , vocab_size=__UpperCAmelCase ) if mbart_aa and finetuned: lowercase__: List[Any] = '''relu''' lowercase__: Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''] lowercase__: Optional[Any] = MBartForConditionalGeneration(__UpperCAmelCase ) model.model.load_state_dict(__UpperCAmelCase ) if finetuned: lowercase__: str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") __A = parser.parse_args() __A = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 2_5_6 class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = ["melgan"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() # From MELGAN lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training. lowercase__: Union[str, Any] = 4.0 # Largest value for most examples lowercase__: Union[str, Any] = 128 self.register_modules( notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: int = output_range if clip: lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: str = input_range lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowercase__: Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = input_tokens > 0 lowercase__, lowercase__: str = self.notes_encoder( encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.continuous_encoder( encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = noise_time if not torch.is_tensor(_UpperCAmelCase ): lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: lowercase__: str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__: Union[str, Any] = self.decoder( encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(_UpperCAmelCase )}.""" ) lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCAmelCase ): if i == 0: lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__: Union[str, Any] = ones lowercase__: str = self.scale_features( _UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase ) lowercase__: Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__: int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__: List[Any] = self.decode( encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] ) lowercase__: Dict = mel[:1] lowercase__: List[Any] = mel.cpu().float().numpy() lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase ) logger.info('''Generated segment''' , _UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__: Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCAmelCase )
2
1
"""simple docstring""" import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "https://www.worldometers.info/coronavirus" ) -> dict: lowercase__: List[str] = BeautifulSoup(requests.get(__UpperCAmelCase ).text , '''html.parser''' ) lowercase__: List[Any] = soup.findAll('''h1''' ) lowercase__: int = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCAmelCase , __UpperCAmelCase )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
2
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = ["model.decoder.embed_positions.weights"] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: if "emb" in name: lowercase__: Dict = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: lowercase__: Tuple = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: lowercase__: Tuple = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: lowercase__: Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: lowercase__: Any = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: lowercase__: Any = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: lowercase__: Dict = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: lowercase__: Optional[Any] = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: lowercase__: Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: lowercase__: List[Any] = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: lowercase__: Union[str, Any] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple[Dict, Dict]: lowercase__: Dict = list(state_dict.keys() ) lowercase__: Union[str, Any] = {} for key in keys: lowercase__: List[Any] = state_dict.pop(__UpperCAmelCase ) lowercase__: List[Any] = rename_keys(__UpperCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj lowercase__: Union[str, Any] = val[:hidden_size, :] lowercase__: List[str] = val[hidden_size : 2 * hidden_size, :] lowercase__: Dict = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowercase__: Optional[Any] = val else: lowercase__: Dict = val return state_dict, enc_dec_proj_state_dict def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values lowercase__: Dict = 1_0_2_4 lowercase__: str = 2_4 lowercase__: Optional[int] = 1_6 elif checkpoint == "medium": lowercase__: int = 1_5_3_6 lowercase__: List[Any] = 4_8 lowercase__: Optional[Any] = 2_4 elif checkpoint == "large": lowercase__: Optional[Any] = 2_0_4_8 lowercase__: Optional[Any] = 4_8 lowercase__: Union[str, Any] = 3_2 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) lowercase__: Optional[int] = MusicgenDecoderConfig( hidden_size=__UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCAmelCase , num_attention_heads=__UpperCAmelCase , ) return config @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="cpu" ) -> Union[str, Any]: lowercase__: Dict = MusicGen.get_pretrained(__UpperCAmelCase , device=__UpperCAmelCase ) lowercase__: Tuple = decoder_config_from_checkpoint(__UpperCAmelCase ) lowercase__: Dict = fairseq_model.lm.state_dict() lowercase__, lowercase__: Tuple = rename_state_dict( __UpperCAmelCase , hidden_size=decoder_config.hidden_size ) lowercase__: Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' ) lowercase__: Any = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) lowercase__: Optional[int] = MusicgenForCausalLM(__UpperCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowercase__, lowercase__: Tuple = decoder.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__UpperCAmelCase ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowercase__: int = MusicgenForConditionalGeneration(text_encoder=__UpperCAmelCase , audio_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__UpperCAmelCase ) # check we can do a forward pass lowercase__: Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowercase__: int = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowercase__: Union[str, Any] = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits if logits.shape != (8, 1, 2_0_4_8): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor lowercase__: Any = AutoTokenizer.from_pretrained('''t5-base''' ) lowercase__: Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) lowercase__: Optional[Any] = MusicgenProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # set the appropriate bos/pad token ids lowercase__: int = 2_0_4_8 lowercase__: Tuple = 2_0_4_8 # set other default generation config params lowercase__: Any = int(3_0 * audio_encoder.config.frame_rate ) lowercase__: str = True lowercase__: Optional[int] = 3.0 if pytorch_dump_folder is not None: Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__UpperCAmelCase ) processor.save_pretrained(__UpperCAmelCase ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__UpperCAmelCase ) processor.push_to_hub(__UpperCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __A = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
2
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: List[str] = seq_length lowercase__: Optional[int] = is_training lowercase__: Dict = use_input_mask lowercase__: List[Any] = use_token_type_ids lowercase__: List[str] = use_labels lowercase__: Union[str, Any] = vocab_size lowercase__: str = hidden_size lowercase__: Any = embedding_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[int] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: Dict = num_labels lowercase__: int = num_choices lowercase__: int = scope def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[Any] = None lowercase__: Any = None lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: str = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_choices lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): lowercase__: Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def _snake_case ( self ): lowercase__: int = MobileBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: return torch.tensor( __UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , ) __A = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase ) lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase__: Tuple = model(_UpperCAmelCase )[0] lowercase__: Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor( [ [ [-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5], [-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0], [2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
2
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=12 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0 , _UpperCAmelCase=None , ): lowercase__: Tuple = parent lowercase__: Optional[Any] = batch_size lowercase__: Union[str, Any] = seq_length lowercase__: Optional[int] = is_training lowercase__: int = use_input_mask lowercase__: List[str] = use_labels lowercase__: Any = vocab_size lowercase__: Dict = hidden_size lowercase__: List[str] = projection_dim lowercase__: str = num_hidden_layers lowercase__: List[str] = num_attention_heads lowercase__: Union[str, Any] = intermediate_size lowercase__: Union[str, Any] = dropout lowercase__: Optional[int] = attention_dropout lowercase__: Optional[Any] = max_position_embeddings lowercase__: List[Any] = initializer_range lowercase__: Any = scope lowercase__: Optional[Any] = bos_token_id def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: Optional[int] = None if self.use_input_mask: lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: lowercase__: List[str] = input_mask.numpy() lowercase__, lowercase__: int = input_mask.shape lowercase__: List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCAmelCase ): lowercase__: Any = 1 lowercase__: List[Any] = 0 lowercase__: Optional[int] = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase ) def _snake_case ( self ): return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = TFBlipTextModel(config=_UpperCAmelCase ) lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase ) lowercase__: Tuple = model(_UpperCAmelCase , training=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self ): lowercase__: Tuple = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__: List[str] = config_and_inputs lowercase__: str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = (TFBlipTextModel,) if is_tf_available() else () _UpperCAmelCase :Optional[Any] = False _UpperCAmelCase :str = False _UpperCAmelCase :List[str] = False def _snake_case ( self ): lowercase__: int = BlipTextModelTester(self ) lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _snake_case ( self ): pass def _snake_case ( self ): pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def _snake_case ( self ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def _snake_case ( self ): pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def _snake_case ( self ): pass @slow def _snake_case ( self ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Dict = TFBlipTextModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
2
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError('''only integers accepted as input''' ) else: lowercase__: List[Any] = str(abs(__UpperCAmelCase ) ) lowercase__: Any = [list(__UpperCAmelCase ) for char in range(len(__UpperCAmelCase ) )] for index in range(len(__UpperCAmelCase ) ): num_transpositions[index].pop(__UpperCAmelCase ) return max( int(''''''.join(list(__UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
2
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :jnp.ndarray _UpperCAmelCase :jnp.ndarray class UpperCAmelCase (nn.Module ): """simple docstring""" _UpperCAmelCase :int _UpperCAmelCase :Tuple[int] = (16, 32, 96, 256) _UpperCAmelCase :jnp.dtype = jnp.floataa def _snake_case ( self ): lowercase__: List[str] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowercase__: Dict = [] for i in range(len(self.block_out_channels ) - 1 ): lowercase__: Dict = self.block_out_channels[i] lowercase__: Tuple = self.block_out_channels[i + 1] lowercase__: Union[str, Any] = nn.Conv( _UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_UpperCAmelCase ) lowercase__: Union[str, Any] = nn.Conv( _UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_UpperCAmelCase ) lowercase__: Optional[Any] = blocks lowercase__: Tuple = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _UpperCAmelCase ): lowercase__: List[str] = self.conv_in(_UpperCAmelCase ) lowercase__: Any = nn.silu(_UpperCAmelCase ) for block in self.blocks: lowercase__: int = block(_UpperCAmelCase ) lowercase__: str = nn.silu(_UpperCAmelCase ) lowercase__: int = self.conv_out(_UpperCAmelCase ) return embedding @flax_register_to_config class UpperCAmelCase (nn.Module ,_UpperCAmelCase ,_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = 32 _UpperCAmelCase :int = 4 _UpperCAmelCase :Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _UpperCAmelCase :Union[bool, Tuple[bool]] = False _UpperCAmelCase :Tuple[int] = (320, 640, 1280, 1280) _UpperCAmelCase :int = 2 _UpperCAmelCase :Union[int, Tuple[int]] = 8 _UpperCAmelCase :Optional[Union[int, Tuple[int]]] = None _UpperCAmelCase :int = 1280 _UpperCAmelCase :float = 0.0 _UpperCAmelCase :bool = False _UpperCAmelCase :jnp.dtype = jnp.floataa _UpperCAmelCase :bool = True _UpperCAmelCase :int = 0 _UpperCAmelCase :str = "rgb" _UpperCAmelCase :Tuple[int] = (16, 32, 96, 256) def _snake_case ( self , _UpperCAmelCase ): # init input tensors lowercase__: Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size) lowercase__: List[str] = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa ) lowercase__: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa ) lowercase__: str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowercase__: Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8) lowercase__: int = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa ) lowercase__, lowercase__: Any = jax.random.split(_UpperCAmelCase ) lowercase__: str = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"] def _snake_case ( self ): lowercase__: int = self.block_out_channels lowercase__: str = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowercase__: int = self.num_attention_heads or self.attention_head_dim # input lowercase__: Dict = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowercase__: str = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowercase__: int = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype ) lowercase__: Optional[Any] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowercase__: int = self.only_cross_attention if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = (num_attention_heads,) * len(self.down_block_types ) # down lowercase__: str = [] lowercase__: Optional[int] = [] lowercase__: Tuple = block_out_channels[0] lowercase__: Union[str, Any] = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowercase__: Optional[int] = output_channel lowercase__: Any = block_out_channels[i] lowercase__: Optional[Any] = i == len(_UpperCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowercase__: List[str] = FlaxCrossAttnDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowercase__: str = FlaxDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_UpperCAmelCase ) for _ in range(self.layers_per_block ): lowercase__: Optional[int] = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) if not is_final_block: lowercase__: Optional[int] = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) lowercase__: str = down_blocks lowercase__: Dict = controlnet_down_blocks # mid lowercase__: Optional[Any] = block_out_channels[-1] lowercase__: Dict = FlaxUNetMidBlockaDCrossAttn( in_channels=_UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowercase__: Any = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1.0 , _UpperCAmelCase = True , _UpperCAmelCase = False , ): lowercase__: Any = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowercase__: int = jnp.flip(_UpperCAmelCase , axis=1 ) # 1. time if not isinstance(_UpperCAmelCase , jnp.ndarray ): lowercase__: str = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: lowercase__: Optional[int] = timesteps.astype(dtype=jnp.floataa ) lowercase__: List[str] = jnp.expand_dims(_UpperCAmelCase , 0 ) lowercase__: List[str] = self.time_proj(_UpperCAmelCase ) lowercase__: List[Any] = self.time_embedding(_UpperCAmelCase ) # 2. pre-process lowercase__: int = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) lowercase__: int = self.conv_in(_UpperCAmelCase ) lowercase__: Any = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) lowercase__: Dict = self.controlnet_cond_embedding(_UpperCAmelCase ) sample += controlnet_cond # 3. down lowercase__: Dict = (sample,) for down_block in self.down_blocks: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__, lowercase__: List[str] = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) else: lowercase__, lowercase__: List[Any] = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowercase__: int = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) # 5. contronet blocks lowercase__: List[Any] = () for down_block_res_sample, controlnet_block in zip(_UpperCAmelCase , self.controlnet_down_blocks ): lowercase__: Tuple = controlnet_block(_UpperCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowercase__: Tuple = controlnet_down_block_res_samples lowercase__: Optional[int] = self.controlnet_mid_block(_UpperCAmelCase ) # 6. scaling lowercase__: List[Any] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_UpperCAmelCase , mid_block_res_sample=_UpperCAmelCase )
2
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = (DPMSolverSinglestepScheduler,) _UpperCAmelCase :List[str] = (("num_inference_steps", 25),) def _snake_case ( self , **_UpperCAmelCase ): lowercase__: str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**_UpperCAmelCase ) return config def _snake_case ( self , _UpperCAmelCase=0 , **_UpperCAmelCase ): lowercase__: int = dict(self.forward_default_kwargs ) lowercase__: Optional[Any] = kwargs.pop('''num_inference_steps''' , _UpperCAmelCase ) lowercase__: Any = self.dummy_sample lowercase__: Optional[Any] = 0.1 * sample lowercase__: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase__: List[Any] = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__: Union[str, Any] = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__: List[Any] = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals lowercase__: List[str] = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase__, lowercase__: Dict = sample, sample for t in range(_UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ): lowercase__: List[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample lowercase__: str = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _snake_case ( self ): pass def _snake_case ( self , _UpperCAmelCase=0 , **_UpperCAmelCase ): lowercase__: Union[str, Any] = dict(self.forward_default_kwargs ) lowercase__: List[Any] = kwargs.pop('''num_inference_steps''' , _UpperCAmelCase ) lowercase__: List[str] = self.dummy_sample lowercase__: str = 0.1 * sample lowercase__: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase__: int = self.get_scheduler_config() lowercase__: Dict = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__: Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) lowercase__: Tuple = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowercase__: Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase__: List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample lowercase__: str = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _snake_case ( self , _UpperCAmelCase=None , **_UpperCAmelCase ): if scheduler is None: lowercase__: List[Any] = self.scheduler_classes[0] lowercase__: int = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__: Dict = scheduler_class(**_UpperCAmelCase ) lowercase__: Union[str, Any] = self.scheduler_classes[0] lowercase__: Optional[Any] = self.get_scheduler_config(**_UpperCAmelCase ) lowercase__: Union[str, Any] = scheduler_class(**_UpperCAmelCase ) lowercase__: Tuple = 10 lowercase__: Any = self.dummy_model() lowercase__: str = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__: List[str] = model(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def _snake_case ( self ): lowercase__: Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) lowercase__: Any = 50 lowercase__: Dict = self.dummy_model() lowercase__: Any = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): lowercase__: str = model(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample lowercase__: List[str] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_574 ) < 1e-3 def _snake_case ( self ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def _snake_case ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowercase__: Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) lowercase__: Union[str, Any] = self.full_loop(scheduler=_UpperCAmelCase ) lowercase__: Dict = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_791 ) < 1e-3 lowercase__: List[Any] = DEISMultistepScheduler.from_config(scheduler.config ) lowercase__: str = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowercase__: Tuple = UniPCMultistepScheduler.from_config(scheduler.config ) lowercase__: List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowercase__: Union[str, Any] = self.full_loop(scheduler=_UpperCAmelCase ) lowercase__: Dict = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_791 ) < 1e-3 def _snake_case ( self ): self.check_over_configs(thresholding=_UpperCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , algorithm_type='''dpmsolver++''' , solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , ) def _snake_case ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def _snake_case ( self ): for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , ) lowercase__: int = self.full_loop( solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , ) assert not torch.isnan(_UpperCAmelCase ).any(), "Samples have nan numbers" def _snake_case ( self ): self.check_over_configs(lower_order_final=_UpperCAmelCase ) self.check_over_configs(lower_order_final=_UpperCAmelCase ) def _snake_case ( self ): self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _snake_case ( self ): self.check_over_configs(variance_type=_UpperCAmelCase ) self.check_over_configs(variance_type='''learned_range''' ) def _snake_case ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=0 ) def _snake_case ( self ): lowercase__: Optional[int] = self.full_loop() lowercase__: Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_791 ) < 1e-3 def _snake_case ( self ): lowercase__: Union[str, Any] = self.full_loop(use_karras_sigmas=_UpperCAmelCase ) lowercase__: List[Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_248 ) < 1e-3 def _snake_case ( self ): lowercase__: Tuple = self.full_loop(prediction_type='''v_prediction''' ) lowercase__: Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.1_453 ) < 1e-3 def _snake_case ( self ): lowercase__: List[str] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_UpperCAmelCase ) lowercase__: Any = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_mean.item() - 0.0_649 ) < 1e-3 def _snake_case ( self ): lowercase__: Union[str, Any] = self.scheduler_classes[0] lowercase__: Optional[int] = self.get_scheduler_config(thresholding=_UpperCAmelCase , dynamic_thresholding_ratio=0 ) lowercase__: Dict = scheduler_class(**_UpperCAmelCase ) lowercase__: Any = 10 lowercase__: str = self.dummy_model() lowercase__: Any = self.dummy_sample_deter.half() scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase__: Any = model(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample assert sample.dtype == torch.floataa
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( metadata={"help": "The output directory where the model will be written."} ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: Dict = HfArgumentParser((ModelArguments,) ) ((lowercase__), ): List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__: Tuple = True lowercase__: int = True lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__: int = decoder_config.decoder_start_token_id lowercase__: Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__: Tuple = decoder_config.bos_token_id if pad_token_id is None: lowercase__: Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__: Optional[Any] = decoder_config.eos_token_id lowercase__: Tuple = decoder_start_token_id lowercase__: Dict = pad_token_id lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool: lowercase__: int = int(number**0.5 ) return number == sq * sq def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> tuple[int, int]: lowercase__: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase__: int = x_den * y_den * z_den lowercase__: int = gcd(__UpperCAmelCase , __UpperCAmelCase ) top //= hcf bottom //= hcf return top, bottom def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 3_5 ) -> int: lowercase__: set = set() lowercase__: int lowercase__: Fraction = Fraction(0 ) lowercase__: tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase__: Union[str, Any] = x_num * y_den + x_den * y_num lowercase__: str = x_den * y_den lowercase__: Dict = gcd(__UpperCAmelCase , __UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__: List[str] = add_three( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) unique_s.add(__UpperCAmelCase ) # n=2 lowercase__: Union[str, Any] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase__: Optional[int] = x_den * x_den * y_den * y_den if is_sq(__UpperCAmelCase ) and is_sq(__UpperCAmelCase ): lowercase__: List[Any] = int(sqrt(__UpperCAmelCase ) ) lowercase__: int = int(sqrt(__UpperCAmelCase ) ) lowercase__: int = gcd(__UpperCAmelCase , __UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__: Optional[int] = add_three( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) unique_s.add(__UpperCAmelCase ) # n=-1 lowercase__: Optional[Any] = x_num * y_num lowercase__: Any = x_den * y_num + x_num * y_den lowercase__: Tuple = gcd(__UpperCAmelCase , __UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__: int = add_three( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) unique_s.add(__UpperCAmelCase ) # n=2 lowercase__: List[str] = x_num * x_num * y_num * y_num lowercase__: Union[str, Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__UpperCAmelCase ) and is_sq(__UpperCAmelCase ): lowercase__: Union[str, Any] = int(sqrt(__UpperCAmelCase ) ) lowercase__: Tuple = int(sqrt(__UpperCAmelCase ) ) lowercase__: Tuple = gcd(__UpperCAmelCase , __UpperCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__: List[Any] = add_three( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) unique_s.add(__UpperCAmelCase ) for num, den in unique_s: total += Fraction(__UpperCAmelCase , __UpperCAmelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: lowercase__: Dict = 0 lowercase__: int = len(__UpperCAmelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCAmelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: if len(__UpperCAmelCase ) <= 1: return arr, 0 lowercase__: str = len(__UpperCAmelCase ) // 2 lowercase__: List[Any] = arr[0:mid] lowercase__: str = arr[mid:] lowercase__, lowercase__: Dict = count_inversions_recursive(__UpperCAmelCase ) lowercase__, lowercase__: Dict = count_inversions_recursive(__UpperCAmelCase ) lowercase__, lowercase__: Union[str, Any] = _count_cross_inversions(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Dict = inversion_p + inversions_q + cross_inversions return c, num_inversions def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowercase__: Any = [] lowercase__: int = 0 while i < len(__UpperCAmelCase ) and j < len(__UpperCAmelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCAmelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCAmelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def SCREAMING_SNAKE_CASE__ ( ) -> int: lowercase__: Optional[Any] = [1_0, 2, 1, 5, 5, 2, 1_1] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowercase__: Dict = count_inversions_bf(__UpperCAmelCase ) lowercase__, lowercase__: Dict = count_inversions_recursive(__UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , __UpperCAmelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowercase__: List[Any] = count_inversions_bf(__UpperCAmelCase ) lowercase__, lowercase__: List[Any] = count_inversions_recursive(__UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __UpperCAmelCase ) # an empty list should also have zero inversions lowercase__: Optional[Any] = [] lowercase__: str = count_inversions_bf(__UpperCAmelCase ) lowercase__, lowercase__: Dict = count_inversions_recursive(__UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __UpperCAmelCase ) if __name__ == "__main__": main()
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
1
"""simple docstring""" import os from collections.abc import Iterator def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ): lowercase__: Optional[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip('''./''' ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]: return F"""{i * ' '}*""" if i else "\n##" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: List[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F"""{md_prefix(__UpperCAmelCase )} {new_part.replace('_' , ' ' ).title()}""" ) return new_path def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "." ) -> None: lowercase__: Any = '''''' for filepath in sorted(good_file_paths(__UpperCAmelCase ) ): lowercase__, lowercase__: Optional[Any] = os.path.split(__UpperCAmelCase ) if filepath != old_path: lowercase__: str = print_path(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: str = (filepath.count(os.sep ) + 1) if filepath else 0 lowercase__: Optional[int] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' ) lowercase__: List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(F"""{md_prefix(__UpperCAmelCase )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md(".")
2
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
1
"""simple docstring""" from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]: lowercase__: str = [0] * no_of_processes lowercase__: Dict = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__UpperCAmelCase ): lowercase__: Any = burst_time[i] lowercase__: list[int] = [] lowercase__: Dict = 0 lowercase__: Optional[Any] = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowercase__: Dict = [] lowercase__: Any = -1 for i in range(__UpperCAmelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: lowercase__: Optional[int] = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowercase__: Tuple = i total_time += burst_time[target_process] completed += 1 lowercase__: Tuple = 0 lowercase__: List[str] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]: lowercase__: List[Any] = [0] * no_of_processes for i in range(__UpperCAmelCase ): lowercase__: str = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __A = 4 __A = [2, 5, 3, 7] __A = [0, 0, 0, 0] __A = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __A = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
2
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __A = "<<<<<<< This should probably be modified because it mentions: " __A = "=======\n>>>>>>>\n" __A = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __A = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase ): lowercase__: int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ): lowercase__: List[str] = get_logger('''datasets-cli/converting''' ) lowercase__: Optional[Any] = tfds_path lowercase__: Dict = datasets_directory def _snake_case ( self ): if os.path.isdir(self._tfds_path ): lowercase__: Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__: Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowercase__: int = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowercase__: Tuple = [] lowercase__: Dict = [] lowercase__: Any = {} if os.path.isdir(self._tfds_path ): lowercase__: Dict = os.listdir(_UpperCAmelCase ) else: lowercase__: Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Tuple = f.readlines() lowercase__: Optional[Any] = [] lowercase__: Dict = False lowercase__: List[str] = False lowercase__: List[Any] = [] for line in lines: lowercase__: List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__: Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowercase__: Dict = '''''' continue elif "from absl import logging" in out_line: lowercase__: Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__: Any = True lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowercase__: List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__: Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__: Dict = f_name.replace('''.py''' , '''''' ) lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowercase__: str = os.path.basename(_UpperCAmelCase ) lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
2
1
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", }, "tokenizer_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json", }, } # TODO(PVP) - this should be removed in Transformers v5 __A = { "t5-small": 5_1_2, "t5-base": 5_1_2, "t5-large": 5_1_2, "t5-3b": 5_1_2, "t5-11b": 5_1_2, } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = VOCAB_FILES_NAMES _UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Tuple = ["input_ids", "attention_mask"] _UpperCAmelCase :Any = TaTokenizer _UpperCAmelCase :List[int] = [] def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowercase__: List[str] = [F"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__: List[Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Tuple = vocab_file lowercase__: int = False if not self.vocab_file else True lowercase__: Optional[int] = extra_ids @staticmethod def _snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__: List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , ) return max_model_length def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__: str = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) logger.info(F"""Copy vocab file to {out_vocab_file}""" ) return (out_vocab_file,) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: List[str] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__: List[Any] = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _snake_case ( self ): return list( set(filter(lambda _UpperCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _snake_case ( self ): return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) if n == 0: return 0 lowercase__: int = float('''-inf''' ) for i in range(1 , n + 1 ): lowercase__: Optional[int] = max( __UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCAmelCase ) ) return max_revue def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowercase__: int = float('''-inf''' ) for i in range(1 , n + 1 ): lowercase__: Union[str, Any] = max( __UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCAmelCase , __UpperCAmelCase ) , ) lowercase__: Any = max_revenue return max_rev[n] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowercase__: int = [float('''-inf''' ) for _ in range(n + 1 )] lowercase__: Union[str, Any] = 0 for i in range(1 , n + 1 ): lowercase__: Union[str, Any] = max_rev[i] for j in range(1 , i + 1 ): lowercase__: List[Any] = max(__UpperCAmelCase , prices[j - 1] + max_rev[i - j] ) lowercase__: Optional[int] = max_revenue_i return max_rev[n] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if n < 0: lowercase__: List[Any] = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__UpperCAmelCase ) if n > len(__UpperCAmelCase ): lowercase__: str = ( '''Each integral piece of rod must have a corresponding price. ''' F"""Got n = {n} but length of prices = {len(__UpperCAmelCase )}""" ) raise ValueError(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: lowercase__: List[str] = [6, 1_0, 1_2, 1_5, 2_0, 2_3] lowercase__: Optional[int] = len(__UpperCAmelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowercase__: Tuple = 3_6 lowercase__: List[str] = top_down_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Union[str, Any] = bottom_up_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Optional[int] = naive_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
2
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "rag" _UpperCAmelCase :List[Any] = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' ) lowercase__: Any = question_encoder_config.pop('''model_type''' ) lowercase__: Tuple = kwargs.pop('''generator''' ) lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: str = reduce_loss lowercase__: str = label_smoothing lowercase__: Dict = exclude_bos_score lowercase__: Any = do_marginalize lowercase__: Optional[int] = title_sep lowercase__: Any = doc_sep lowercase__: Any = n_docs lowercase__: List[Any] = max_combined_length lowercase__: int = dataset lowercase__: int = dataset_split lowercase__: str = index_name lowercase__: Dict = retrieval_vector_size lowercase__: Dict = retrieval_batch_size lowercase__: List[str] = passages_path lowercase__: str = index_path lowercase__: Optional[Any] = use_dummy_dataset lowercase__: str = output_retrieved lowercase__: List[str] = do_deduplication lowercase__: List[Any] = use_cache if self.forced_eos_token_id is None: lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase ) @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = copy.deepcopy(self.__dict__ ) lowercase__: str = self.question_encoder.to_dict() lowercase__: str = self.generator.to_dict() lowercase__: str = self.__class__.model_type return output
2
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __A = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCAmelCase :Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _UpperCAmelCase :Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _UpperCAmelCase :Optional[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[int] = ZeroShotClassificationPipeline( model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase )]} ) # No kwarg lowercase__: int = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase )]} ) lowercase__: Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase )]} ) lowercase__: List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( _UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) lowercase__: Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( _UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) lowercase__: Tuple = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 lowercase__: Union[str, Any] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( _UpperCAmelCase , [ {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} for i in range(1 ) ] , ) lowercase__: Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( _UpperCAmelCase , [ {'''sequence''': ANY(_UpperCAmelCase ), '''labels''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], '''scores''': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} for i in range(2 ) ] , ) with self.assertRaises(_UpperCAmelCase ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(_UpperCAmelCase ): classifier(_UpperCAmelCase , candidate_labels='''politics''' ) with self.assertRaises(_UpperCAmelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(_UpperCAmelCase ): classifier('''Who are you voting for in 2020?''' , candidate_labels=_UpperCAmelCase ) with self.assertRaises(_UpperCAmelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(_UpperCAmelCase ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_UpperCAmelCase , ) self.run_entailment_id(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[Any] = zero_shot_classifier.model.config lowercase__: Optional[Any] = config.labelaid lowercase__: Dict = zero_shot_classifier.entailment_id lowercase__: Optional[int] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) lowercase__: Union[str, Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowercase__: Union[str, Any] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowercase__: Optional[int] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) lowercase__: List[str] = original_labelaid self.assertEqual(_UpperCAmelCase , zero_shot_classifier.entailment_id ) @require_torch def _snake_case ( self ): lowercase__: Tuple = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def _snake_case ( self ): lowercase__: Tuple = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) lowercase__: List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @require_tf def _snake_case ( self ): lowercase__: List[Any] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) lowercase__: List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.333, 0.333, 0.333], } , ) @slow @require_torch def _snake_case ( self ): lowercase__: List[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) lowercase__: Optional[int] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) lowercase__: Optional[int] = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCAmelCase , ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , ) @slow @require_tf def _snake_case ( self ): lowercase__: Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) lowercase__: List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.976, 0.015, 0.009], } , ) lowercase__: Tuple = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCAmelCase , ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.817, 0.713, 0.018, 0.018], } , )
2
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __A = "hf-internal-testing/tiny-random-bert" __A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: Dict = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(os.path.isfile(_UpperCAmelCase ) ) # File is cached at the same place the second time. lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Using a specific revision to test the full commit hash. lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase ) with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: int = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) ) lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: Union[str, Any] = mock.Mock() lowercase__: str = 500 lowercase__: Union[str, Any] = {} lowercase__: List[str] = HTTPError lowercase__: int = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head: lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) def _snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCAmelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' ) lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
2
1
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights lowercase__: List[str] = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase ) lowercase__: List[str] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase )[0] , '''snapshots''' ) )] lowercase__: int = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__, lowercase__: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: int = jax.random.PRNGKey(0 ) lowercase__: Tuple = 4 lowercase__: List[Any] = jax.device_count() lowercase__: Optional[Any] = num_samples * [prompt] lowercase__: Dict = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: List[str] = replicate(_UpperCAmelCase ) lowercase__: List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = shard(_UpperCAmelCase ) lowercase__: List[Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3 assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1 lowercase__: List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_UpperCAmelCase ) == num_samples def _snake_case ( self ): lowercase__, lowercase__: Any = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[Any] = jax.random.PRNGKey(0 ) lowercase__: int = 50 lowercase__: str = jax.device_count() lowercase__: int = num_samples * [prompt] lowercase__: Any = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: int = replicate(_UpperCAmelCase ) lowercase__: Dict = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = shard(_UpperCAmelCase ) lowercase__: Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1 def _snake_case ( self ): lowercase__, lowercase__: List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase ) lowercase__: Union[str, Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[Any] = jax.random.PRNGKey(0 ) lowercase__: Optional[Any] = 50 lowercase__: Optional[int] = jax.device_count() lowercase__: List[str] = num_samples * [prompt] lowercase__: Any = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Optional[int] = replicate(_UpperCAmelCase ) lowercase__: Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[str] = shard(_UpperCAmelCase ) lowercase__: str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def _snake_case ( self ): lowercase__, lowercase__: List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa ) lowercase__: List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[str] = jax.random.PRNGKey(0 ) lowercase__: List[Any] = 50 lowercase__: List[Any] = jax.device_count() lowercase__: Optional[int] = num_samples * [prompt] lowercase__: Tuple = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Any = replicate(_UpperCAmelCase ) lowercase__: int = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = shard(_UpperCAmelCase ) lowercase__: Any = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def _snake_case ( self ): lowercase__: Optional[int] = FlaxDDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , ) lowercase__, lowercase__: List[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , ) lowercase__: List[Any] = scheduler.create_state() lowercase__: Tuple = scheduler_state lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: str = jax.random.PRNGKey(0 ) lowercase__: Dict = 50 lowercase__: Tuple = jax.device_count() lowercase__: str = num_samples * [prompt] lowercase__: Dict = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Union[str, Any] = replicate(_UpperCAmelCase ) lowercase__: str = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = shard(_UpperCAmelCase ) lowercase__: List[str] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1 def _snake_case ( self ): lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: str = jax.device_count() lowercase__: List[str] = num_samples * [prompt] lowercase__: Any = jax.random.split(jax.random.PRNGKey(0 ) , _UpperCAmelCase ) lowercase__, lowercase__: Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , ) lowercase__: List[str] = replicate(_UpperCAmelCase ) lowercase__: str = pipeline.prepare_inputs(_UpperCAmelCase ) lowercase__: Any = shard(_UpperCAmelCase ) lowercase__: Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) lowercase__: Optional[int] = images[2, 0, 256, 10:17, 1] # With memory efficient attention lowercase__, lowercase__: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , ) lowercase__: Optional[Any] = replicate(_UpperCAmelCase ) lowercase__: int = pipeline.prepare_inputs(_UpperCAmelCase ) lowercase__: Optional[Any] = shard(_UpperCAmelCase ) lowercase__: List[str] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) lowercase__: List[Any] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
2
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "beit" def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Union[str, Any] = vocab_size lowercase__: List[Any] = hidden_size lowercase__: Optional[int] = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: int = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: List[str] = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = image_size lowercase__: Tuple = patch_size lowercase__: int = num_channels lowercase__: Optional[Any] = use_mask_token lowercase__: List[Any] = use_absolute_position_embeddings lowercase__: Optional[int] = use_relative_position_bias lowercase__: Optional[int] = use_shared_relative_position_bias lowercase__: Optional[Any] = layer_scale_init_value lowercase__: Union[str, Any] = drop_path_rate lowercase__: Tuple = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__: Tuple = out_indices lowercase__: Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__: List[str] = use_auxiliary_head lowercase__: Optional[Any] = auxiliary_loss_weight lowercase__: str = auxiliary_channels lowercase__: List[str] = auxiliary_num_convs lowercase__: Tuple = auxiliary_concat_input lowercase__: Dict = semantic_loss_ignore_index class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): return 1e-4
2
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } __A = { "moussaKam/mbarthez": 1_0_2_4, "moussaKam/barthez": 1_0_2_4, "moussaKam/barthez-orangesum-title": 1_0_2_4, } __A = "▁" class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = VOCAB_FILES_NAMES _UpperCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :int = ["input_ids", "attention_mask"] _UpperCAmelCase :Optional[int] = BarthezTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it lowercase__: Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Any = vocab_file lowercase__: int = False if not self.vocab_file else True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__: Any = [self.cls_token_id] lowercase__: Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: List[str] = [self.sep_token_id] lowercase__: List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__: List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
1
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[8, 16, 32, 64] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=1 , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: Tuple = image_size lowercase__: List[Any] = num_channels lowercase__: Tuple = embeddings_size lowercase__: List[str] = hidden_sizes lowercase__: Dict = depths lowercase__: int = is_training lowercase__: Union[str, Any] = use_labels lowercase__: Optional[Any] = hidden_act lowercase__: int = num_labels lowercase__: Optional[int] = scope lowercase__: List[Any] = len(_UpperCAmelCase ) lowercase__: List[Any] = out_features lowercase__: str = out_indices lowercase__: Any = num_groups def _snake_case ( self ): lowercase__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: List[Any] = None if self.use_labels: lowercase__: List[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowercase__: str = self.get_config() return config, pixel_values, labels def _snake_case ( self ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = BitModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[Any] = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_labels lowercase__: Optional[Any] = BitForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = BitBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__: List[str] = None lowercase__: Union[str, Any] = BitBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self ): lowercase__: Any = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__: str = config_and_inputs lowercase__: Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _UpperCAmelCase :int = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :str = False _UpperCAmelCase :List[Any] = False _UpperCAmelCase :List[str] = False _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): lowercase__: List[Any] = BitModelTester(self ) lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ): return @unittest.skip(reason='''Bit does not output attentions''' ) def _snake_case ( self ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _snake_case ( self ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _snake_case ( self ): pass def _snake_case ( self ): lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Tuple = model_class(_UpperCAmelCase ) lowercase__: int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__: Dict = [*signature.parameters.keys()] lowercase__: Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Any = model_class(config=_UpperCAmelCase ) for name, module in model.named_modules(): if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def _snake_case ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): lowercase__: Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) lowercase__: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__: Any = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__: str = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__: Tuple = layer_type lowercase__: Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__: Union[str, Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _snake_case ( self ): pass def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def _snake_case ( self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Dict = BitModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: lowercase__: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self ): lowercase__: int = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase ) lowercase__: Optional[int] = self.default_image_processor lowercase__: Optional[int] = prepare_img() lowercase__: Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): lowercase__: int = model(**_UpperCAmelCase ) # verify the logits lowercase__: Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) @require_torch class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = (BitBackbone,) if is_torch_available() else () _UpperCAmelCase :Optional[int] = BitConfig _UpperCAmelCase :str = False def _snake_case ( self ): lowercase__: Tuple = BitModelTester(self )
2
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline _UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self ): torch.manual_seed(0 ) lowercase__: Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase__: List[Any] = DDIMScheduler() torch.manual_seed(0 ) lowercase__: Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__: Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase ) lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__: int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): lowercase__: int = torch.manual_seed(_UpperCAmelCase ) lowercase__: List[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[str] = self.get_dummy_components() lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = '''french fries''' lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) lowercase__: Optional[Any] = output.images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 ) lowercase__: List[str] = output.images lowercase__: List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: int = self.get_dummy_components() lowercase__: List[str] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: Any = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[Any] = self.get_dummy_components() lowercase__: Any = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase ) lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _UpperCAmelCase=0 ): lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) lowercase__: int = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Any = '''stabilityai/stable-diffusion-2-base''' lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase ) lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: List[str] = self.get_inputs() lowercase__: Dict = pipe(**_UpperCAmelCase ).images lowercase__: Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self ): lowercase__: int = 0 def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: lowercase__: List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__: Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Any = latents[0, -3:, -3:, -1] lowercase__: List[Any] = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__: Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Optional[Any] = latents[0, -3:, -3:, -1] lowercase__: Any = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__: int = False lowercase__: str = '''stabilityai/stable-diffusion-2-base''' lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base''' lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__: Any = self.get_inputs() lowercase__: List[str] = pipe(**_UpperCAmelCase ) lowercase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
2
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @property def _snake_case ( self ): torch.manual_seed(0 ) lowercase__: List[str] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _snake_case ( self ): lowercase__: Dict = self.dummy_uncond_unet lowercase__: Optional[int] = ScoreSdeVeScheduler() lowercase__: str = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) sde_ve.to(_UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Union[str, Any] = torch.manual_seed(0 ) lowercase__: Optional[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_UpperCAmelCase ).images lowercase__: List[str] = torch.manual_seed(0 ) lowercase__: List[str] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[ 0 ] lowercase__: Optional[int] = image[0, -3:, -3:, -1] lowercase__: Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__: Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: str = '''google/ncsnpp-church-256''' lowercase__: Dict = UNetaDModel.from_pretrained(_UpperCAmelCase ) lowercase__: Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase ) lowercase__: Any = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) sde_ve.to(_UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[Any] = torch.manual_seed(0 ) lowercase__: Union[str, Any] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_UpperCAmelCase ).images lowercase__: Any = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase__: Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
2
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Dict = DebertaVaTokenizer _UpperCAmelCase :Tuple = DebertaVaTokenizerFast _UpperCAmelCase :int = True _UpperCAmelCase :int = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = '''this is a test''' lowercase__: int = '''this is a test''' return input_text, output_text def _snake_case ( self ): lowercase__: Optional[int] = '''<pad>''' lowercase__: Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_UpperCAmelCase ) , 30001 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _snake_case ( self ): # fmt: off lowercase__: int = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off lowercase__: Dict = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Any = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(_UpperCAmelCase ) lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = '''This is a test''' lowercase__: str = [13, 1, 4398, 25, 21, 1289] lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # fmt: off lowercase__: str = '''I was born in 92000, and this is falsé.''' lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase ) lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , ) @slow def _snake_case ( self ): # fmt: off lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float: lowercase__: int = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: lowercase__: Tuple = 1 - (matter_density + radiation_density + dark_energy) lowercase__: List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowercase__: List[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __A = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
2
"""simple docstring""" import unittest from transformers import DonutProcessor __A = "naver-clova-ix/donut-base" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__: Union[str, Any] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__: str = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
2
1
"""simple docstring""" import math class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase=0 ): # a graph with Node 0,1,...,N-1 lowercase__: List[str] = n lowercase__: int = [ [math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase ) ] # adjacency matrix for weight lowercase__: List[str] = [ [math.inf for j in range(0 , _UpperCAmelCase )] for i in range(0 , _UpperCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = w def _snake_case ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase__: Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
2
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
2
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 2_5_6 class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = ["melgan"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() # From MELGAN lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training. lowercase__: Union[str, Any] = 4.0 # Largest value for most examples lowercase__: Union[str, Any] = 128 self.register_modules( notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: int = output_range if clip: lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: str = input_range lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowercase__: Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = input_tokens > 0 lowercase__, lowercase__: str = self.notes_encoder( encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.continuous_encoder( encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = noise_time if not torch.is_tensor(_UpperCAmelCase ): lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: lowercase__: str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__: Union[str, Any] = self.decoder( encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(_UpperCAmelCase )}.""" ) lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCAmelCase ): if i == 0: lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__: Union[str, Any] = ones lowercase__: str = self.scale_features( _UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase ) lowercase__: Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__: int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__: List[Any] = self.decode( encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] ) lowercase__: Dict = mel[:1] lowercase__: List[Any] = mel.cpu().float().numpy() lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase ) logger.info('''Generated segment''' , _UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__: Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCAmelCase )
2
1
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> str: if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowercase__: Dict = [] for i in range(__UpperCAmelCase ): lowercase__: Any = i / num_diffusion_timesteps lowercase__: str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[Any] = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase :Any = 2 @register_to_config def __init__( self , _UpperCAmelCase = 1000 , _UpperCAmelCase = 0.00_085 , _UpperCAmelCase = 0.012 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = "linspace" , _UpperCAmelCase = 0 , ): if trained_betas is not None: lowercase__: int = torch.tensor(_UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": lowercase__: str = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase__: List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase__: Union[str, Any] = betas_for_alpha_bar(_UpperCAmelCase ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowercase__: List[Any] = 1.0 - self.betas lowercase__: Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if schedule_timesteps is None: lowercase__: Union[str, Any] = self.timesteps lowercase__: Tuple = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowercase__: Tuple = 1 if len(_UpperCAmelCase ) > 1 else 0 else: lowercase__: Dict = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep lowercase__: str = self._index_counter[timestep_int] return indices[pos].item() @property def _snake_case ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , ): lowercase__: Optional[int] = self.index_for_timestep(_UpperCAmelCase ) if self.state_in_first_order: lowercase__: Tuple = self.sigmas[step_index] else: lowercase__: List[Any] = self.sigmas_interpol[step_index] lowercase__: Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ): lowercase__: Tuple = num_inference_steps lowercase__: Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowercase__: int = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowercase__: List[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__: List[Any] = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowercase__: int = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__: str = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowercase__: int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowercase__: Optional[Any] = torch.from_numpy(np.log(_UpperCAmelCase ) ).to(_UpperCAmelCase ) lowercase__: Dict = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase ) lowercase__: Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowercase__: str = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) # interpolate sigmas lowercase__: Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() lowercase__: Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowercase__: Optional[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_UpperCAmelCase ).startswith('''mps''' ): # mps does not support float64 lowercase__: List[Any] = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=torch.floataa ) else: lowercase__: Any = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase ) # interpolate timesteps lowercase__: List[str] = self.sigma_to_t(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=timesteps.dtype ) lowercase__: List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() lowercase__: List[str] = torch.cat([timesteps[:1], interleaved_timesteps] ) lowercase__: int = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowercase__: Optional[Any] = defaultdict(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # get log sigma lowercase__: Optional[Any] = sigma.log() # get distribution lowercase__: List[Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range lowercase__: Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowercase__: Tuple = low_idx + 1 lowercase__: Optional[int] = self.log_sigmas[low_idx] lowercase__: List[str] = self.log_sigmas[high_idx] # interpolate sigmas lowercase__: List[str] = (low - log_sigma) / (low - high) lowercase__: Tuple = w.clamp(0 , 1 ) # transform interpolation to time range lowercase__: Any = (1 - w) * low_idx + w * high_idx lowercase__: Tuple = t.view(sigma.shape ) return t @property def _snake_case ( self ): return self.sample is None def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): lowercase__: Optional[Any] = self.index_for_timestep(_UpperCAmelCase ) # advance index counter by 1 lowercase__: List[str] = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowercase__: List[str] = self.sigmas[step_index] lowercase__: str = self.sigmas_interpol[step_index + 1] lowercase__: Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowercase__: Union[str, Any] = self.sigmas[step_index - 1] lowercase__: Optional[Any] = self.sigmas_interpol[step_index] lowercase__: List[str] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowercase__: Optional[Any] = 0 lowercase__: List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowercase__: Any = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__: Union[str, Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowercase__: Dict = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__: Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowercase__: str = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowercase__: Optional[int] = sigma_interpol - sigma_hat # store for 2nd order step lowercase__: List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowercase__: str = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowercase__: Union[str, Any] = sigma_next - sigma_hat lowercase__: Optional[Any] = self.sample lowercase__: Optional[Any] = None lowercase__: Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowercase__: Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ): # mps does not support float64 lowercase__: Any = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowercase__: Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowercase__: int = self.timesteps.to(original_samples.device ) lowercase__: Any = timesteps.to(original_samples.device ) lowercase__: List[str] = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps] lowercase__: int = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowercase__: str = sigma.unsqueeze(-1 ) lowercase__: List[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
2
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 2_5_6 class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = ["melgan"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() # From MELGAN lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training. lowercase__: Union[str, Any] = 4.0 # Largest value for most examples lowercase__: Union[str, Any] = 128 self.register_modules( notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: int = output_range if clip: lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: str = input_range lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowercase__: Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = input_tokens > 0 lowercase__, lowercase__: str = self.notes_encoder( encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.continuous_encoder( encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = noise_time if not torch.is_tensor(_UpperCAmelCase ): lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: lowercase__: str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__: Union[str, Any] = self.decoder( encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(_UpperCAmelCase )}.""" ) lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCAmelCase ): if i == 0: lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__: Union[str, Any] = ones lowercase__: str = self.scale_features( _UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase ) lowercase__: Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__: int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__: List[Any] = self.decode( encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] ) lowercase__: Dict = mel[:1] lowercase__: List[Any] = mel.cpu().float().numpy() lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase ) logger.info('''Generated segment''' , _UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__: Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCAmelCase )
2
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: List[str] = seq_length lowercase__: Optional[int] = is_training lowercase__: Dict = use_input_mask lowercase__: List[Any] = use_token_type_ids lowercase__: List[str] = use_labels lowercase__: Union[str, Any] = vocab_size lowercase__: str = hidden_size lowercase__: Any = embedding_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[int] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: Dict = num_labels lowercase__: int = num_choices lowercase__: int = scope def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[Any] = None lowercase__: Any = None lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: str = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_choices lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): lowercase__: Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def _snake_case ( self ): lowercase__: int = MobileBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: return torch.tensor( __UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , ) __A = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase ) lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase__: Tuple = model(_UpperCAmelCase )[0] lowercase__: Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor( [ [ [-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5], [-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0], [2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: # noqa: E741 lowercase__: Dict = len(__UpperCAmelCase ) lowercase__: int = 0 lowercase__: str = [0] * n lowercase__: Tuple = [False] * n lowercase__: List[Any] = [False] * n def dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if parent == root: out_edge_count += 1 lowercase__: Any = True lowercase__: Optional[Any] = at for to in l[at]: if to == parent: pass elif not visited[to]: lowercase__: Optional[Any] = dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase__: Optional[int] = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: lowercase__: List[str] = True # AP found via cycle if at == low[to]: lowercase__: List[Any] = True else: lowercase__: Optional[int] = min(low[at] , __UpperCAmelCase ) return out_edge_count for i in range(__UpperCAmelCase ): if not visited[i]: lowercase__: Any = 0 lowercase__: str = dfs(__UpperCAmelCase , __UpperCAmelCase , -1 , __UpperCAmelCase ) lowercase__: str = out_edge_count > 1 for x in range(len(__UpperCAmelCase ) ): if is_art[x] is True: print(__UpperCAmelCase ) # Adjacency list of graph __A = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
2
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { "configuration_rag": ["RagConfig"], "retrieval_rag": ["RagRetriever"], "tokenization_rag": ["RagTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
1
"""simple docstring""" class UpperCAmelCase : # Public class to implement a graph """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[int] = row lowercase__: Optional[Any] = col lowercase__: List[Any] = graph def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Checking all 8 elements surrounding nth element lowercase__: List[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__: Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__: Union[str, Any] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ) def _snake_case ( self ): # And finally, count all islands. lowercase__: Optional[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__: Dict = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) count += 1 return count
2
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: Tuple = '''ZinengTang/tvlt-base''' lowercase__: Any = tempfile.mkdtemp() def _snake_case ( self , **_UpperCAmelCase ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def _snake_case ( self , **_UpperCAmelCase ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def _snake_case ( self ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): lowercase__: str = self.get_image_processor() lowercase__: Dict = self.get_feature_extractor() lowercase__: Optional[int] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowercase__: str = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.get_image_processor() lowercase__: Optional[int] = self.get_feature_extractor() lowercase__: Dict = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) lowercase__: Tuple = np.ones([12000] ) lowercase__: List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ) lowercase__: Optional[Any] = processor(audio=_UpperCAmelCase , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): lowercase__: int = self.get_image_processor() lowercase__: Optional[Any] = self.get_feature_extractor() lowercase__: Union[str, Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) lowercase__: Any = np.ones([3, 224, 224] ) lowercase__: Optional[Any] = image_processor(_UpperCAmelCase , return_tensors='''np''' ) lowercase__: Any = processor(images=_UpperCAmelCase , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): lowercase__: Dict = self.get_image_processor() lowercase__: Optional[int] = self.get_feature_extractor() lowercase__: Tuple = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) lowercase__: Dict = np.ones([12000] ) lowercase__: Dict = np.ones([3, 224, 224] ) lowercase__: Optional[Any] = processor(audio=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_UpperCAmelCase ): processor() def _snake_case ( self ): lowercase__: Union[str, Any] = self.get_image_processor() lowercase__: str = self.get_feature_extractor() lowercase__: Any = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
2
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( metadata={"help": "The output directory where the model will be written."} ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: Dict = HfArgumentParser((ModelArguments,) ) ((lowercase__), ): List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__: Tuple = True lowercase__: int = True lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__: int = decoder_config.decoder_start_token_id lowercase__: Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__: Tuple = decoder_config.bos_token_id if pad_token_id is None: lowercase__: Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__: Optional[Any] = decoder_config.eos_token_id lowercase__: Tuple = decoder_start_token_id lowercase__: Dict = pad_token_id lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: lowercase__: List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowercase__: Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowercase__: Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__: List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__: int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.02 , ): lowercase__: str = parent lowercase__: List[str] = batch_size lowercase__: Optional[Any] = seq_length lowercase__: str = is_training lowercase__: Dict = use_labels lowercase__: List[str] = vocab_size lowercase__: Dict = hidden_size lowercase__: Optional[Any] = num_hidden_layers lowercase__: str = num_attention_heads lowercase__: List[str] = intermediate_size lowercase__: Optional[Any] = hidden_act lowercase__: int = hidden_dropout_prob lowercase__: Union[str, Any] = attention_probs_dropout_prob lowercase__: Tuple = max_position_embeddings lowercase__: int = eos_token_id lowercase__: List[str] = pad_token_id lowercase__: Any = bos_token_id lowercase__: Tuple = initializer_range def _snake_case ( self ): lowercase__: Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowercase__: Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowercase__: Tuple = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) lowercase__: str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , ) lowercase__: str = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _snake_case ( self ): lowercase__, lowercase__: Optional[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Union[str, Any] = 20 lowercase__: Optional[int] = model_class_name(_UpperCAmelCase ) lowercase__: Any = model.encode(inputs_dict['''input_ids'''] ) lowercase__, lowercase__: int = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase__: int = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowercase__: Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__: str = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase__: List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase__: Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) lowercase__: List[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = 20 lowercase__: Any = model_class_name(_UpperCAmelCase ) lowercase__: Dict = model.encode(inputs_dict['''input_ids'''] ) lowercase__, lowercase__: List[str] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase__: Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase__: List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__: Dict = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase__: Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase__: List[Any] = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase__: Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) lowercase__: Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" ) @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = 99 def _snake_case ( self ): lowercase__: Optional[int] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowercase__: Optional[Any] = input_ids.shape[0] lowercase__: str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _snake_case ( self ): lowercase__, lowercase__, lowercase__: List[Any] = self._get_config_and_data() lowercase__: List[Any] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase ) lowercase__: int = lm_model(input_ids=_UpperCAmelCase ) lowercase__: Any = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowercase__: int = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase ) lowercase__: List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowercase__: Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowercase__: Dict = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) lowercase__: Union[str, Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowercase__: List[Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) lowercase__: List[Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() lowercase__: Any = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ,_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[Any] = True _UpperCAmelCase :List[Any] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _UpperCAmelCase :Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _snake_case ( self ): lowercase__: Dict = FlaxBlenderbotModelTester(self ) def _snake_case ( self ): lowercase__, lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__: List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): lowercase__: Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase__: Any = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( self ): lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__: Union[str, Any] = model_class(_UpperCAmelCase ) lowercase__: Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowercase__: str = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('''JIT Enabled''' ): lowercase__: List[Any] = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase__: Dict = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _snake_case ( self ): for model_class_name in self.all_model_classes: lowercase__: str = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowercase__: Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id lowercase__: Optional[int] = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def _snake_case ( self ): lowercase__: Optional[Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} lowercase__: Optional[Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowercase__: Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCAmelCase ) lowercase__: Tuple = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowercase__: Optional[int] = ['''Sam'''] lowercase__: str = tokenizer(_UpperCAmelCase , return_tensors='''jax''' ) lowercase__: List[Any] = model.generate(**_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: List[str] = '''Sam is a great name. It means "sun" in Gaelic.''' lowercase__: Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: # Return True if there is node that has not iterated. lowercase__: str = [False] * len(__UpperCAmelCase ) lowercase__: Any = [] queue.append(__UpperCAmelCase ) lowercase__: str = True while queue: lowercase__: int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__UpperCAmelCase ) lowercase__: int = True lowercase__: Tuple = u return visited[t] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: # This array is filled by BFS and to store path lowercase__: str = [-1] * (len(__UpperCAmelCase )) lowercase__: List[str] = 0 while bfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowercase__: int = float('''Inf''' ) lowercase__: Tuple = sink while s != source: # Find the minimum value in select path lowercase__: Optional[int] = min(__UpperCAmelCase , graph[parent[s]][s] ) lowercase__: Dict = parent[s] max_flow += path_flow lowercase__: List[Any] = sink while v != source: lowercase__: Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase__: int = parent[v] return max_flow __A = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __A ,__A = 0, 5 print(ford_fulkerson(graph, source, sink))
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "philschmid/bart-large-cnn-samsum" _UpperCAmelCase :Dict = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) _UpperCAmelCase :Union[str, Any] = "summarizer" _UpperCAmelCase :Union[str, Any] = AutoTokenizer _UpperCAmelCase :Optional[int] = AutoModelForSeqaSeqLM _UpperCAmelCase :Union[str, Any] = ["text"] _UpperCAmelCase :Tuple = ["text"] def _snake_case ( self , _UpperCAmelCase ): return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' , truncation=_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): return self.model.generate(**_UpperCAmelCase )[0] def _snake_case ( self , _UpperCAmelCase ): return self.pre_processor.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
2
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
1
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
1
"""simple docstring""" __A = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __A = [{"type": "code", "content": INSTALL_CONTENT}] __A = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
2
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __A = "<<<<<<< This should probably be modified because it mentions: " __A = "=======\n>>>>>>>\n" __A = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __A = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase ): lowercase__: int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ): lowercase__: List[str] = get_logger('''datasets-cli/converting''' ) lowercase__: Optional[Any] = tfds_path lowercase__: Dict = datasets_directory def _snake_case ( self ): if os.path.isdir(self._tfds_path ): lowercase__: Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__: Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowercase__: int = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowercase__: Tuple = [] lowercase__: Dict = [] lowercase__: Any = {} if os.path.isdir(self._tfds_path ): lowercase__: Dict = os.listdir(_UpperCAmelCase ) else: lowercase__: Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Tuple = f.readlines() lowercase__: Optional[Any] = [] lowercase__: Dict = False lowercase__: List[str] = False lowercase__: List[Any] = [] for line in lines: lowercase__: List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__: Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowercase__: Dict = '''''' continue elif "from absl import logging" in out_line: lowercase__: Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__: Any = True lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowercase__: List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__: Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__: Dict = f_name.replace('''.py''' , '''''' ) lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowercase__: str = os.path.basename(_UpperCAmelCase ) lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
2
1
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase__: List[str] = np.full((len(__UpperCAmelCase ), sequence_length, 2) , __UpperCAmelCase ) else: lowercase__: Optional[int] = np.full((len(__UpperCAmelCase ), sequence_length) , __UpperCAmelCase ) for i, tensor in enumerate(__UpperCAmelCase ): if padding_side == "right": if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase__: Any = tensor[:sequence_length] else: lowercase__: List[str] = tensor[:sequence_length] else: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase__: Any = tensor[:sequence_length] else: lowercase__: Optional[int] = tensor[:sequence_length] return out_tensor.tolist() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: Union[str, Any] = ord(__UpperCAmelCase ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True lowercase__: Union[str, Any] = unicodedata.category(__UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :PreTrainedTokenizerBase _UpperCAmelCase :Union[bool, str, PaddingStrategy] = True _UpperCAmelCase :Optional[int] = None _UpperCAmelCase :Optional[int] = None _UpperCAmelCase :int = -100 _UpperCAmelCase :str = "pt" def _snake_case ( self , _UpperCAmelCase ): import torch lowercase__: str = '''label''' if '''label''' in features[0].keys() else '''labels''' lowercase__: Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__: Tuple = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowercase__: Optional[Any] = torch.tensor(batch['''entity_ids'''] ).shape[1] lowercase__: Optional[Any] = self.tokenizer.padding_side if padding_side == "right": lowercase__: str = [ list(_UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) for label in labels ] else: lowercase__: int = [ [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) + list(_UpperCAmelCase ) for label in labels ] lowercase__: Tuple = [feature['''ner_tags'''] for feature in features] lowercase__: Dict = padding_tensor(_UpperCAmelCase , -1 , _UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = [feature['''original_entity_spans'''] for feature in features] lowercase__: Optional[Any] = padding_tensor(_UpperCAmelCase , (-1, -1) , _UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = {k: torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
1
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 8 ) -> str: lowercase__: List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(__UpperCAmelCase ) lowercase__: int = i // 3 lowercase__: List[Any] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase__: Tuple = ( chars_incl + random(__UpperCAmelCase , quotient + remainder ) + random(__UpperCAmelCase , __UpperCAmelCase ) + random(__UpperCAmelCase , __UpperCAmelCase ) ) lowercase__: Any = list(__UpperCAmelCase ) shuffle(__UpperCAmelCase ) return "".join(__UpperCAmelCase ) # random is a generalised function for letters, characters and numbers def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: pass # Put your code here... def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: pass # Put your code here... def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: pass # Put your code here... def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 8 ) -> bool: if len(__UpperCAmelCase ) < min_length: # Your Password must be at least 8 characters long return False lowercase__: int = any(char in ascii_uppercase for char in password ) lowercase__: Dict = any(char in ascii_lowercase for char in password ) lowercase__: Any = any(char in digits for char in password ) lowercase__: Dict = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def SCREAMING_SNAKE_CASE__ ( ) -> int: lowercase__: Dict = int(input('''Please indicate the max length of your password: ''' ).strip() ) lowercase__: Optional[Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(__UpperCAmelCase ) ) print( '''Alternative Password generated:''' , alternative_password_generator(__UpperCAmelCase , __UpperCAmelCase ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
2
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "rag" _UpperCAmelCase :List[Any] = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' ) lowercase__: Any = question_encoder_config.pop('''model_type''' ) lowercase__: Tuple = kwargs.pop('''generator''' ) lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: str = reduce_loss lowercase__: str = label_smoothing lowercase__: Dict = exclude_bos_score lowercase__: Any = do_marginalize lowercase__: Optional[int] = title_sep lowercase__: Any = doc_sep lowercase__: Any = n_docs lowercase__: List[Any] = max_combined_length lowercase__: int = dataset lowercase__: int = dataset_split lowercase__: str = index_name lowercase__: Dict = retrieval_vector_size lowercase__: Dict = retrieval_batch_size lowercase__: List[str] = passages_path lowercase__: str = index_path lowercase__: Optional[Any] = use_dummy_dataset lowercase__: str = output_retrieved lowercase__: List[str] = do_deduplication lowercase__: List[Any] = use_cache if self.forced_eos_token_id is None: lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase ) @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = copy.deepcopy(self.__dict__ ) lowercase__: str = self.question_encoder.to_dict() lowercase__: str = self.generator.to_dict() lowercase__: str = self.__class__.model_type return output
2
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> int: lowercase__: Optional[Any] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase__: Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: lowercase__: Union[str, Any] = '''''' else: lowercase__: str = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__: Optional[int] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__: List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__: int = in_proj_weight[ : config.hidden_size, : ] lowercase__: Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__: Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__: int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__: int = in_proj_weight[ -config.hidden_size :, : ] lowercase__: Any = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: lowercase__: Optional[Any] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowercase__: Tuple = dct.pop(__UpperCAmelCase ) lowercase__: Union[str, Any] = val def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: lowercase__: List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__: Tuple = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict: lowercase__: int = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__UpperCAmelCase , ) lowercase__: List[Any] = ViTHybridConfig(backbone_config=__UpperCAmelCase , image_size=3_8_4 , num_labels=1_0_0_0 ) lowercase__: Optional[int] = False # load original model from timm lowercase__: Optional[Any] = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase__: str = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCAmelCase ) lowercase__: Optional[Any] = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase__: Optional[Any] = '''huggingface/label-files''' lowercase__: Optional[Any] = '''imagenet-1k-id2label.json''' lowercase__: Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) lowercase__: Optional[int] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} lowercase__: Any = idalabel lowercase__: Tuple = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": lowercase__: List[Any] = ViTHybridModel(__UpperCAmelCase ).eval() else: lowercase__: Optional[int] = ViTHybridForImageClassification(__UpperCAmelCase ).eval() model.load_state_dict(__UpperCAmelCase ) # create image processor lowercase__: List[str] = create_transform(**resolve_data_config({} , model=__UpperCAmelCase ) ) lowercase__: Tuple = transform.transforms lowercase__: Union[str, Any] = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase__: Tuple = ViTHybridImageProcessor( do_resize=__UpperCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__: str = prepare_img() lowercase__: List[Any] = transform(__UpperCAmelCase ).unsqueeze(0 ) lowercase__: Optional[Any] = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) # verify logits with torch.no_grad(): lowercase__: Any = model(__UpperCAmelCase ) lowercase__: Union[str, Any] = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: lowercase__: Tuple = timm_model.forward_features(__UpperCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCAmelCase , outputs.pooler_output , atol=1e-3 ) else: lowercase__: Optional[Any] = timm_model(__UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCAmelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print(F"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(F"""ybelkada/{vit_name}""" ) processor.push_to_hub(F"""ybelkada/{vit_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) __A = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __A = "hf-internal-testing/tiny-random-bert" __A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") __A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCAmelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: Dict = f.read() self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(os.path.isfile(_UpperCAmelCase ) ) # File is cached at the same place the second time. lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Using a specific revision to test the full commit hash. lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase ) with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' ) def _snake_case ( self ): with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ): lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' ) with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f: lowercase__: int = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) ) lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) lowercase__: Union[str, Any] = mock.Mock() lowercase__: str = 500 lowercase__: Union[str, Any] = {} lowercase__: List[str] = HTTPError lowercase__: int = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head: lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase ) self.assertIsNone(_UpperCAmelCase ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) ) def _snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCAmelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' ) lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
2
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :str = LayoutLMTokenizer _UpperCAmelCase :int = LayoutLMTokenizerFast _UpperCAmelCase :Union[str, Any] = True _UpperCAmelCase :List[str] = True def _snake_case ( self ): super().setUp() lowercase__: Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _snake_case ( self , **_UpperCAmelCase ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Dict = '''UNwant\u00E9d,running''' lowercase__: Optional[Any] = '''unwanted, running''' return input_text, output_text def _snake_case ( self ): lowercase__: Dict = self.tokenizer_class(self.vocab_file ) lowercase__: Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def _snake_case ( self ): pass
2
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "beit" def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Union[str, Any] = vocab_size lowercase__: List[Any] = hidden_size lowercase__: Optional[int] = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: int = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: List[str] = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = image_size lowercase__: Tuple = patch_size lowercase__: int = num_channels lowercase__: Optional[Any] = use_mask_token lowercase__: List[Any] = use_absolute_position_embeddings lowercase__: Optional[int] = use_relative_position_bias lowercase__: Optional[int] = use_shared_relative_position_bias lowercase__: Optional[Any] = layer_scale_init_value lowercase__: Union[str, Any] = drop_path_rate lowercase__: Tuple = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__: Tuple = out_indices lowercase__: Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__: List[str] = use_auxiliary_head lowercase__: Optional[Any] = auxiliary_loss_weight lowercase__: str = auxiliary_channels lowercase__: List[str] = auxiliary_num_convs lowercase__: Tuple = auxiliary_concat_input lowercase__: Dict = semantic_loss_ignore_index class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): return 1e-4
2
1
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "mvp" _UpperCAmelCase :List[Any] = ["past_key_values"] _UpperCAmelCase :str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _UpperCAmelCase=50267 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1024 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=100 , _UpperCAmelCase=800 , **_UpperCAmelCase , ): lowercase__: List[str] = vocab_size lowercase__: List[str] = max_position_embeddings lowercase__: Union[str, Any] = d_model lowercase__: List[str] = encoder_ffn_dim lowercase__: Dict = encoder_layers lowercase__: Any = encoder_attention_heads lowercase__: Dict = decoder_ffn_dim lowercase__: Union[str, Any] = decoder_layers lowercase__: List[str] = decoder_attention_heads lowercase__: List[str] = dropout lowercase__: List[Any] = attention_dropout lowercase__: List[str] = activation_dropout lowercase__: str = activation_function lowercase__: Union[str, Any] = init_std lowercase__: Dict = encoder_layerdrop lowercase__: List[str] = decoder_layerdrop lowercase__: Any = classifier_dropout lowercase__: Union[str, Any] = use_cache lowercase__: Any = encoder_layers lowercase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__: Any = use_prompt lowercase__: List[Any] = prompt_length lowercase__: int = prompt_mid_dim super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _UpperCAmelCase ): lowercase__: str = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' )
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
1
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[list[int]]: lowercase__: list[list[int]] = [] create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase ) return result def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(__UpperCAmelCase , total_number - level + 2 ): current_list.append(__UpperCAmelCase ) create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase ) current_list.pop() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None: for i in total_list: print(*__UpperCAmelCase ) if __name__ == "__main__": __A = 4 __A = 2 __A = generate_all_combinations(n, k) print_all_state(total_list)
2
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline _UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self ): torch.manual_seed(0 ) lowercase__: Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase__: List[Any] = DDIMScheduler() torch.manual_seed(0 ) lowercase__: Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__: Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase ) lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__: int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): lowercase__: int = torch.manual_seed(_UpperCAmelCase ) lowercase__: List[Any] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[str] = self.get_dummy_components() lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = '''french fries''' lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) lowercase__: Optional[Any] = output.images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: Union[str, Any] = self.get_dummy_components() lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 ) lowercase__: List[str] = output.images lowercase__: List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: int = self.get_dummy_components() lowercase__: List[str] = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: Any = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__: List[Any] = self.get_dummy_components() lowercase__: Any = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase ) lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase ) lowercase__: int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase ) lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _UpperCAmelCase=0 ): lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) lowercase__: int = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ): lowercase__: Any = '''stabilityai/stable-diffusion-2-base''' lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def _snake_case ( self ): lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase ) lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: List[str] = self.get_inputs() lowercase__: Dict = pipe(**_UpperCAmelCase ).images lowercase__: Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__: List[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _snake_case ( self ): lowercase__: int = 0 def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: lowercase__: List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__: Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Any = latents[0, -3:, -3:, -1] lowercase__: List[Any] = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__: Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__: Optional[Any] = latents[0, -3:, -3:, -1] lowercase__: Any = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__: int = False lowercase__: str = '''stabilityai/stable-diffusion-2-base''' lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() lowercase__: Tuple = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base''' lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' ) lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__: Any = self.get_inputs() lowercase__: List[str] = pipe(**_UpperCAmelCase ) lowercase__: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
2
1
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: List[str] = seq_length lowercase__: Optional[int] = is_training lowercase__: Dict = use_input_mask lowercase__: List[Any] = use_token_type_ids lowercase__: List[str] = use_labels lowercase__: Union[str, Any] = vocab_size lowercase__: str = hidden_size lowercase__: Any = embedding_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[int] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: Dict = num_labels lowercase__: int = num_choices lowercase__: int = scope def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[Any] = None lowercase__: Any = None lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: str = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_choices lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): lowercase__: Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def _snake_case ( self ): lowercase__: int = MobileBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: return torch.tensor( __UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , ) __A = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase ) lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase__: Tuple = model(_UpperCAmelCase )[0] lowercase__: Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor( [ [ [-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5], [-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0], [2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
2
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Dict = DebertaVaTokenizer _UpperCAmelCase :Tuple = DebertaVaTokenizerFast _UpperCAmelCase :int = True _UpperCAmelCase :int = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = '''this is a test''' lowercase__: int = '''this is a test''' return input_text, output_text def _snake_case ( self ): lowercase__: Optional[int] = '''<pad>''' lowercase__: Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_UpperCAmelCase ) , 30001 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _snake_case ( self ): # fmt: off lowercase__: int = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off lowercase__: Dict = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Any = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(_UpperCAmelCase ) lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = '''This is a test''' lowercase__: str = [13, 1, 4398, 25, 21, 1289] lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # fmt: off lowercase__: str = '''I was born in 92000, and this is falsé.''' lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase ) lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , ) @slow def _snake_case ( self ): # fmt: off lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = None _UpperCAmelCase :Optional[int] = BloomTokenizerFast _UpperCAmelCase :List[Any] = BloomTokenizerFast _UpperCAmelCase :Any = True _UpperCAmelCase :str = False _UpperCAmelCase :List[Any] = "tokenizer_file" _UpperCAmelCase :Any = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def _snake_case ( self ): super().setUp() lowercase__: Dict = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.get_rust_tokenizer() lowercase__: Dict = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] lowercase__: Union[str, Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] lowercase__: List[Any] = tokenizer.batch_encode_plus(_UpperCAmelCase )['''input_ids'''] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.batch_decode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__: int = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input lowercase__: Dict = '''This is a simple input''' lowercase__: int = ['''This is a simple input 1''', '''This is a simple input 2'''] lowercase__: int = ('''This is a simple input''', '''This is a pair''') lowercase__: Optional[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) lowercase__: Optional[int] = None # Hotfixing padding = None self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , ) def _snake_case ( self ): lowercase__: Tuple = self.get_rust_tokenizer() lowercase__: int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_UpperCAmelCase ) lowercase__: str = next(iter(_UpperCAmelCase ) )['''premise'''] # pick up one data lowercase__: Any = list(sample_data.values() ) lowercase__: Union[str, Any] = list(map(tokenizer.encode , _UpperCAmelCase ) ) lowercase__: Union[str, Any] = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
2
"""simple docstring""" import unittest from transformers import DonutProcessor __A = "naver-clova-ix/donut-base" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__: Union[str, Any] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__: str = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( ) -> int: for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: lowercase__: Optional[int] = 1 lowercase__: List[Any] = 2 while i * i <= n: lowercase__: Any = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def SCREAMING_SNAKE_CASE__ ( ) -> int: return next(i for i in triangle_number_generator() if count_divisors(__UpperCAmelCase ) > 5_0_0 ) if __name__ == "__main__": print(solution())
2
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __A = logging.get_logger(__name__) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
2
1
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = TransfoXLTokenizer _UpperCAmelCase :List[str] = False _UpperCAmelCase :str = False def _snake_case ( self ): super().setUp() lowercase__: int = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] lowercase__: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _snake_case ( self , **_UpperCAmelCase ): lowercase__: Tuple = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[Any] = '''<unk> UNwanted , running''' lowercase__: Optional[Any] = '''<unk> unwanted, running''' return input_text, output_text def _snake_case ( self ): lowercase__: str = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(_UpperCAmelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [0, 4, 8, 7] ) def _snake_case ( self ): lowercase__: Optional[int] = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _snake_case ( self ): lowercase__: str = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case ( self ): lowercase__: Optional[Any] = TransfoXLTokenizer(lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' lowercase__: Optional[Any] = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = self.get_tokenizer() lowercase__: int = len(_UpperCAmelCase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(_UpperCAmelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
2
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = 2_5_6 class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = ["melgan"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() # From MELGAN lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training. lowercase__: Union[str, Any] = 4.0 # Largest value for most examples lowercase__: Union[str, Any] = 128 self.register_modules( notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: int = output_range if clip: lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ): lowercase__, lowercase__: str = input_range lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowercase__: Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[str] = input_tokens > 0 lowercase__, lowercase__: str = self.notes_encoder( encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.continuous_encoder( encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = noise_time if not torch.is_tensor(_UpperCAmelCase ): lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: lowercase__: str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__: Union[str, Any] = self.decoder( encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(_UpperCAmelCase )}.""" ) lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(_UpperCAmelCase ): if i == 0: lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__: Union[str, Any] = ones lowercase__: str = self.scale_features( _UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase ) lowercase__: Dict = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__: int = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__: List[Any] = self.decode( encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] ) lowercase__: Dict = mel[:1] lowercase__: List[Any] = mel.cpu().float().numpy() lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase ) logger.info('''Generated segment''' , _UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__: Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_UpperCAmelCase )
2
1
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __A = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") __A = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() __A = "|".join(sys.argv[1:]) __A = re.compile(Rf'''^({joined_dirs}).*?\.py$''') __A = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
2
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool: return len(set(__UpperCAmelCase ) ) == len(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
2
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): lowercase__: Dict = parent lowercase__: Optional[int] = batch_size lowercase__: List[str] = seq_length lowercase__: Optional[int] = is_training lowercase__: Dict = use_input_mask lowercase__: List[Any] = use_token_type_ids lowercase__: List[str] = use_labels lowercase__: Union[str, Any] = vocab_size lowercase__: str = hidden_size lowercase__: Any = embedding_size lowercase__: Any = num_hidden_layers lowercase__: Any = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: Optional[int] = max_position_embeddings lowercase__: List[Any] = type_vocab_size lowercase__: Tuple = type_sequence_label_size lowercase__: Optional[int] = initializer_range lowercase__: Dict = num_labels lowercase__: int = num_choices lowercase__: int = scope def _snake_case ( self ): lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_input_mask: lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[Any] = None lowercase__: Any = None lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) lowercase__: str = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: str = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.num_labels lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Dict = self.num_choices lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__: Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): lowercase__: Optional[int] = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ): Union[str, Any] = config_and_inputs lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): lowercase__: Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) lowercase__: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def _snake_case ( self ): lowercase__: int = MobileBertModelTester(self ) lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: return torch.tensor( __UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , ) __A = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase ) lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): lowercase__: Tuple = model(_UpperCAmelCase )[0] lowercase__: Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor( [ [ [-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5], [-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0], [2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
2
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __A = logging.get_logger(__name__) # pylint: disable=invalid-name __A = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Union[PIL.Image.Image, np.ndarray] class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): super().__init__() self.register_modules( prior=_UpperCAmelCase , image_encoder=_UpperCAmelCase , image_processor=_UpperCAmelCase , scheduler=_UpperCAmelCase , renderer=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if latents is None: lowercase__: Optional[Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowercase__: Optional[Any] = latents.to(_UpperCAmelCase ) lowercase__: Dict = latents * scheduler.init_noise_sigma return latents def _snake_case ( self , _UpperCAmelCase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase__: str = torch.device(F"""cuda:{gpu_id}""" ) lowercase__: List[Any] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCAmelCase , _UpperCAmelCase ) @property def _snake_case ( self ): if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(image[0] , torch.Tensor ): lowercase__: List[Any] = torch.cat(_UpperCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCAmelCase , axis=0 ) if not isinstance(_UpperCAmelCase , torch.Tensor ): lowercase__: Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) lowercase__: str = image.to(dtype=self.image_encoder.dtype , device=_UpperCAmelCase ) lowercase__: str = self.image_encoder(_UpperCAmelCase )['''last_hidden_state'''] lowercase__: Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 lowercase__: Any = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: lowercase__: str = torch.zeros_like(_UpperCAmelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__: Dict = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCAmelCase ) def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 25 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 4.0 , _UpperCAmelCase = 64 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ): if isinstance(_UpperCAmelCase , PIL.Image.Image ): lowercase__: str = 1 elif isinstance(_UpperCAmelCase , torch.Tensor ): lowercase__: int = image.shape[0] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): lowercase__: int = len(_UpperCAmelCase ) else: raise ValueError( F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCAmelCase )}""" ) lowercase__: Optional[Any] = self._execution_device lowercase__: Dict = batch_size * num_images_per_prompt lowercase__: str = guidance_scale > 1.0 lowercase__: List[Any] = self._encode_image(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # prior self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase ) lowercase__: Union[str, Any] = self.scheduler.timesteps lowercase__: List[Any] = self.prior.config.num_embeddings lowercase__: List[str] = self.prior.config.embedding_dim lowercase__: int = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim lowercase__: List[Any] = latents.reshape(latents.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance lowercase__: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__: Optional[int] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.prior( _UpperCAmelCase , timestep=_UpperCAmelCase , proj_embedding=_UpperCAmelCase , ).predicted_image_embedding # remove the variance lowercase__, lowercase__: Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: lowercase__, lowercase__: Optional[int] = noise_pred.chunk(2 ) lowercase__: Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) lowercase__: Optional[int] = self.scheduler.step( _UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCAmelCase ) lowercase__: Any = [] for i, latent in enumerate(_UpperCAmelCase ): print() lowercase__: Any = self.renderer.decode( latent[None, :] , _UpperCAmelCase , size=_UpperCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_UpperCAmelCase ) lowercase__: Tuple = torch.stack(_UpperCAmelCase ) if output_type not in ["np", "pil"]: raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" ) lowercase__: Tuple = images.cpu().numpy() if output_type == "pil": lowercase__: List[Any] = [self.numpy_to_pil(_UpperCAmelCase ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCAmelCase )
2
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } __A = { "albert-base-v1": 5_1_2, "albert-large-v1": 5_1_2, "albert-xlarge-v1": 5_1_2, "albert-xxlarge-v1": 5_1_2, "albert-base-v2": 5_1_2, "albert-large-v2": 5_1_2, "albert-xlarge-v2": 5_1_2, "albert-xxlarge-v2": 5_1_2, } __A = "▁" class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = VOCAB_FILES_NAMES _UpperCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowercase__: int = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) lowercase__: Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) lowercase__: Optional[int] = do_lower_case lowercase__: str = remove_space lowercase__: int = keep_accents lowercase__: Tuple = vocab_file lowercase__: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) @property def _snake_case ( self ): return len(self.sp_model ) def _snake_case ( self ): lowercase__: Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowercase__: Union[str, Any] = self.__dict__.copy() lowercase__: List[str] = None return state def __setstate__( self , _UpperCAmelCase ): lowercase__: str = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__: int = {} lowercase__: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , _UpperCAmelCase ): if self.remove_space: lowercase__: Optional[Any] = ''' '''.join(inputs.strip().split() ) else: lowercase__: Tuple = inputs lowercase__: Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase__: Tuple = unicodedata.normalize('''NFKD''' , _UpperCAmelCase ) lowercase__: Optional[int] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: lowercase__: List[str] = outputs.lower() return outputs def _snake_case ( self , _UpperCAmelCase ): lowercase__: str = self.preprocess_text(_UpperCAmelCase ) lowercase__: List[str] = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) lowercase__: Tuple = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase__: Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase__: Dict = cur_pieces[1:] else: lowercase__: Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def _snake_case ( self , _UpperCAmelCase ): return self.sp_model.PieceToId(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Union[str, Any] = [] lowercase__: Optional[int] = '''''' lowercase__: str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token lowercase__: List[str] = True lowercase__: List[str] = [] else: current_sub_tokens.append(_UpperCAmelCase ) lowercase__: Dict = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: str = [self.sep_token_id] lowercase__: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[int] = [self.sep_token_id] lowercase__: Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__: Union[str, Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: lowercase__: Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
2
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
2
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
2
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: List[str] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__: Optional[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__: Optional[int] = 4 lowercase__: str = 4_8 lowercase__: List[Any] = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__: Any = [6, 6, 6, 6] lowercase__: int = 6_0 lowercase__: Tuple = [6, 6, 6, 6] lowercase__: List[str] = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__: int = 4 lowercase__: Optional[Any] = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__: Any = 1 lowercase__: int = 1 lowercase__: Optional[Any] = 1_2_6 lowercase__: List[str] = 7 lowercase__: Optional[Any] = 2_5_5.0 lowercase__: List[str] = '''''' return config def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: if "patch_embed.proj" in name and "layers" not in name: lowercase__: str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowercase__: List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: lowercase__: str = name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: lowercase__: List[str] = name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: lowercase__: Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowercase__: int = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowercase__: Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowercase__: str = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowercase__: Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowercase__: Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: lowercase__: Any = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: lowercase__: Union[str, Any] = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: lowercase__: int = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: lowercase__: List[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: lowercase__: Any = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": lowercase__: str = '''layernorm.weight''' if name == "norm.bias": lowercase__: List[str] = '''layernorm.bias''' if "conv_first" in name: lowercase__: Optional[Any] = name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__: Dict = name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__: Dict = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: lowercase__: List[str] = name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: lowercase__: int = name.replace('''upsample.2''' , '''upsample.convolution_1''' ) lowercase__: str = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": lowercase__: Optional[int] = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) lowercase__: Tuple = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: lowercase__: Optional[int] = '''swin2sr.''' + name return name def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: for key in orig_state_dict.copy().keys(): lowercase__: Optional[int] = orig_state_dict.pop(__UpperCAmelCase ) if "qkv" in key: lowercase__: Optional[int] = key.split('''.''' ) lowercase__: int = int(key_split[1] ) lowercase__: Any = int(key_split[4] ) lowercase__: Tuple = config.embed_dim if "weight" in key: lowercase__: Optional[Any] = val[:dim, :] lowercase__: Optional[Any] = val[dim : dim * 2, :] lowercase__: List[str] = val[-dim:, :] else: lowercase__: List[Any] = val[:dim] lowercase__: List[str] = val[dim : dim * 2] lowercase__: List[str] = val[-dim:] pass else: lowercase__: Union[str, Any] = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: lowercase__: Dict = get_config(__UpperCAmelCase ) lowercase__: List[Any] = SwinaSRForImageSuperResolution(__UpperCAmelCase ) model.eval() lowercase__: List[Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' ) lowercase__: List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(__UpperCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values lowercase__: Tuple = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' lowercase__: Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' ) lowercase__: Optional[int] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__: Optional[Any] = 1_2_6 if '''Jpeg''' in checkpoint_url else 2_5_6 lowercase__: Optional[int] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowercase__: Tuple = transforms(__UpperCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: lowercase__: Dict = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__: Dict = model(__UpperCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__: Dict = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowercase__: List[str] = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__: Optional[int] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowercase__: Tuple = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__: Optional[int] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowercase__: str = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__: Optional[int] = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowercase__: Any = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__: Tuple = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowercase__: Tuple = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-3 ) print('''Looks ok!''' ) lowercase__: List[Any] = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } lowercase__: Dict = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") __A = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
2
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
"""simple docstring""" # Lint as: python3 import itertools import os import re __A = re.compile(R"([A-Z]+)([A-Z][a-z])") __A = re.compile(R"([a-z\d])([A-Z])") __A = re.compile(R"(?<!_)_(?!_)") __A = re.compile(R"(_{2,})") __A = R"^\w+(\.\w+)*$" __A = R"<>:/\|?*" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: lowercase__: Optional[int] = _uppercase_uppercase_re.sub(R'''\1_\2''' , __UpperCAmelCase ) lowercase__: int = _lowercase_uppercase_re.sub(R'''\1_\2''' , __UpperCAmelCase ) return name.lower() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]: lowercase__: List[str] = _single_underscore_re.split(__UpperCAmelCase ) lowercase__: Tuple = [_multiple_underscores_re.split(__UpperCAmelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__UpperCAmelCase ) if n != '''''' ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: if os.path.basename(__UpperCAmelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: if os.path.basename(__UpperCAmelCase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , __UpperCAmelCase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(__UpperCAmelCase )}-{split}""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]: lowercase__: List[str] = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" lowercase__: int = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) return F"""{filepath}*""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]: lowercase__: Optional[int] = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Dict = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) if shard_lengths: lowercase__: str = len(__UpperCAmelCase ) lowercase__: Optional[int] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__UpperCAmelCase )] if filetype_suffix: lowercase__: Dict = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: lowercase__: Tuple = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
2
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase : """simple docstring""" _UpperCAmelCase :str = field( metadata={"help": "The output directory where the model will be written."} ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } ,) _UpperCAmelCase :str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } ,) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) _UpperCAmelCase :Optional[str] = field( default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: lowercase__: Dict = HfArgumentParser((ModelArguments,) ) ((lowercase__), ): List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__: Tuple = True lowercase__: int = True lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__: int = decoder_config.decoder_start_token_id lowercase__: Tuple = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__: Tuple = decoder_config.bos_token_id if pad_token_id is None: lowercase__: Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__: Optional[Any] = decoder_config.eos_token_id lowercase__: Tuple = decoder_start_token_id lowercase__: Dict = pad_token_id lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: lowercase__: Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' lowercase__: Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' ) return image def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]: lowercase__: int = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: lowercase__: Optional[int] = dct.pop(__UpperCAmelCase ) lowercase__: str = val def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase__: List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowercase__: Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowercase__: Union[str, Any] = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) ) lowercase__: int = qkv_bias def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowercase__: List[str] = 3_6_4 if '''coco''' in model_name else 2_2_4 lowercase__: Tuple = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase__: List[Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__UpperCAmelCase ).to_dict() elif "opt-6.7b" in model_name: lowercase__: Any = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__UpperCAmelCase ).to_dict() elif "t5-xl" in model_name: lowercase__: Any = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase__: List[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() lowercase__: Tuple = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase ) return config, image_size @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int: lowercase__: Tuple = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) lowercase__: List[Any] = tokenizer('''\n''' , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowercase__, lowercase__: str = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowercase__: Dict = BlipaForConditionalGeneration(__UpperCAmelCase ).eval() lowercase__: Union[str, Any] = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } lowercase__, lowercase__: int = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowercase__: Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu''' lowercase__, lowercase__, lowercase__: List[Any] = load_model_and_preprocess( name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase ) original_model.eval() print('''Done!''' ) # update state dict keys lowercase__: Union[str, Any] = original_model.state_dict() lowercase__: List[Any] = create_rename_keys(__UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase__: Optional[Any] = state_dict.pop(__UpperCAmelCase ) if key.startswith('''Qformer.bert''' ): lowercase__: List[str] = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowercase__: Union[str, Any] = key.replace('''self''' , '''attention''' ) if "opt_proj" in key: lowercase__: Union[str, Any] = key.replace('''opt_proj''' , '''language_projection''' ) if "t5_proj" in key: lowercase__: Any = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''opt''' ): lowercase__: Optional[int] = key.replace('''opt''' , '''language''' ) if key.startswith('''t5''' ): lowercase__: int = key.replace('''t5''' , '''language''' ) lowercase__: List[Any] = val # read in qv biases read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase ) lowercase__, lowercase__: Any = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) assert len(__UpperCAmelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase__: Optional[int] = load_demo_image() lowercase__: List[Any] = vis_processors['''eval'''](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase ) lowercase__: int = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase ) # create processor lowercase__: Any = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase ) lowercase__: Union[str, Any] = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) lowercase__: str = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase ) # make sure processor creates exact same pixel values assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) original_model.to(__UpperCAmelCase ) hf_model.to(__UpperCAmelCase ) with torch.no_grad(): if "opt" in model_name: lowercase__: List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits lowercase__: Tuple = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits else: lowercase__: Tuple = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits lowercase__: Optional[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 ) lowercase__: List[str] = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase__: str = torch.tensor( [[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__UpperCAmelCase ) assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase__: Dict = torch.tensor( [[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__UpperCAmelCase ) else: # cast to same type lowercase__: str = logits.dtype assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1e-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) lowercase__: str = '''''' lowercase__: Optional[int] = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase ) lowercase__: Optional[Any] = original_model.generate({'''image''': original_pixel_values} ) lowercase__: Any = hf_model.generate( __UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('''Original generation:''' , __UpperCAmelCase ) lowercase__: Optional[Any] = input_ids.shape[1] lowercase__: Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase ) lowercase__: List[Any] = [text.strip() for text in output_text] print('''HF generation:''' , __UpperCAmelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__UpperCAmelCase ) hf_model.save_pretrained(__UpperCAmelCase ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() __A = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __A = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "ctrl" _UpperCAmelCase :int = ["past_key_values"] _UpperCAmelCase :Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ): lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[int] = n_positions lowercase__: Optional[int] = n_embd lowercase__: Any = n_layer lowercase__: Any = n_head lowercase__: int = dff lowercase__: Dict = resid_pdrop lowercase__: Any = embd_pdrop lowercase__: Any = layer_norm_epsilon lowercase__: Optional[int] = initializer_range lowercase__: Dict = use_cache super().__init__(**_UpperCAmelCase )
2
1
"""simple docstring""" __A = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on __A = {value: key for key, value in MORSE_CODE_DICT.items()} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def SCREAMING_SNAKE_CASE__ ( ) -> None: lowercase__: Any = '''Morse code here!''' print(__UpperCAmelCase ) lowercase__: List[Any] = encrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) lowercase__: Union[str, Any] = decrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) if __name__ == "__main__": main()
2
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int: lowercase__: str = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
2
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: lowercase__: int = '''''' for word_or_phrase in separated: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(__UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
2
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ): lowercase__: int = bp_numa lowercase__: Union[str, Any] = bp_numa lowercase__: List[str] = bp_numa lowercase__: str = conva_get[:2] lowercase__: Union[str, Any] = conva_get[2] lowercase__: Any = size_pa lowercase__: Optional[Any] = rate_w lowercase__: Tuple = rate_t lowercase__: List[str] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1 def _snake_case ( self , _UpperCAmelCase ): # save model dict with pickle lowercase__: int = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_UpperCAmelCase , '''wb''' ) as f: pickle.dump(_UpperCAmelCase , _UpperCAmelCase ) print(F"""Model saved: {save_path}""" ) @classmethod def _snake_case ( cls , _UpperCAmelCase ): # read saved model with open(_UpperCAmelCase , '''rb''' ) as f: lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301 lowercase__: Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowercase__: Any = model_dic.get('''size_pooling1''' ) lowercase__: int = model_dic.get('''num_bp1''' ) lowercase__: Optional[int] = model_dic.get('''num_bp2''' ) lowercase__: str = model_dic.get('''num_bp3''' ) lowercase__: Any = model_dic.get('''rate_weight''' ) lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' ) # create model instance lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # modify model parameter lowercase__: Dict = model_dic.get('''w_conv1''' ) lowercase__: Dict = model_dic.get('''wkj''' ) lowercase__: str = model_dic.get('''vji''' ) lowercase__: List[Any] = model_dic.get('''thre_conv1''' ) lowercase__: Optional[int] = model_dic.get('''thre_bp2''' ) lowercase__: Tuple = model_dic.get('''thre_bp3''' ) return conv_ins def _snake_case ( self , _UpperCAmelCase ): return 1 / (1 + np.exp(-1 * x )) def _snake_case ( self , _UpperCAmelCase ): return round(_UpperCAmelCase , 3 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # convolution process lowercase__: Any = convs[0] lowercase__: Tuple = convs[1] lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0] # get the data slice of original image data, data_focus lowercase__: List[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ): lowercase__: Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_UpperCAmelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__: Optional[int] = [] lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_UpperCAmelCase ): lowercase__: str = [] for i_focus in range(len(_UpperCAmelCase ) ): lowercase__: Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape( _UpperCAmelCase , _UpperCAmelCase ) data_featuremap.append(_UpperCAmelCase ) # expanding the data slice to One dimenssion lowercase__: Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) ) lowercase__: Any = np.asarray(_UpperCAmelCase ) return focus_list, data_featuremap def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ): # pooling process lowercase__: List[Any] = len(featuremaps[0] ) lowercase__: Any = int(size_map / size_pooling ) lowercase__: List[Any] = [] for i_map in range(len(_UpperCAmelCase ) ): lowercase__: Any = featuremaps[i_map] lowercase__: Tuple = [] for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_UpperCAmelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_UpperCAmelCase ) ) lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ) featuremap_pooled.append(_UpperCAmelCase ) return featuremap_pooled def _snake_case ( self , _UpperCAmelCase ): # expanding three dimension data to one dimension list lowercase__: Optional[Any] = [] for i in range(len(_UpperCAmelCase ) ): lowercase__: Any = np.shape(data[i] ) lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) lowercase__: List[str] = data_listed.getA().tolist()[0] data_expanded.extend(_UpperCAmelCase ) lowercase__: List[str] = np.asarray(_UpperCAmelCase ) return data_expanded def _snake_case ( self , _UpperCAmelCase ): # expanding matrix to one dimension list lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase ) lowercase__: List[str] = np.shape(_UpperCAmelCase ) lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = [] lowercase__: List[str] = 0 for i_map in range(_UpperCAmelCase ): lowercase__: Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[Any] = pd_pool[ i_pool ] lowercase__: List[Any] = i_pool + 1 lowercase__: str = np.multiply( _UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_UpperCAmelCase ) return pd_all def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) ) lowercase__: Tuple = 0 lowercase__: Tuple = [] lowercase__: Optional[int] = 10000 while rp < n_repeat and mse >= error_accuracy: lowercase__: Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_UpperCAmelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__: List[Any] = np.asmatrix(datas_train[p] ) lowercase__: Optional[int] = np.asarray(datas_teach[p] ) lowercase__, lowercase__: List[str] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: int = np.shape(_UpperCAmelCase ) lowercase__: Optional[Any] = self._expand(_UpperCAmelCase ) lowercase__: Any = data_bp_input lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa lowercase__: str = self.sig(_UpperCAmelCase ) lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa lowercase__: Dict = self.sig(_UpperCAmelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__: str = np.multiply( (data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: str = np.multiply( np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) ) lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji ) lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__: List[str] = pd_conva_pooled.T.getA().tolist() lowercase__: Optional[Any] = self._calculate_gradient_from_pool( _UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__: str = self._expand_mat(pd_conva_all[k_conv] ) lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__: List[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__: str = rp + 1 lowercase__: Optional[Any] = error_count / patterns all_mse.append(_UpperCAmelCase ) def draw_error(): lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_UpperCAmelCase , '''+-''' ) plt.plot(_UpperCAmelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_UpperCAmelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def _snake_case ( self , _UpperCAmelCase ): # model predict lowercase__: Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) ) for p in range(len(_UpperCAmelCase ) ): lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] ) lowercase__, lowercase__: Any = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga ) lowercase__: str = self._expand(_UpperCAmelCase ) lowercase__: List[Any] = data_bp_input lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa lowercase__: Any = self.sig(_UpperCAmelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out] return np.asarray(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # return the data of image after convoluting process so we can check it out lowercase__: int = np.asmatrix(_UpperCAmelCase ) lowercase__, lowercase__: Optional[int] = self.convolute( _UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
2
1
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: lowercase__: Optional[int] = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=__UpperCAmelCase , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=__UpperCAmelCase , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) lowercase__: List[Any] = parser.parse_args() return args def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any: def fn(__UpperCAmelCase ): return tokenizer(examples['''text'''] ) return fn def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: lowercase__: Dict = [] for i in range(len(tokenized_data['''input_ids'''] ) ): lowercase__: int = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } lowercase__: List[str] = tf.train.Features(feature=__UpperCAmelCase ) lowercase__: str = tf.train.Example(features=__UpperCAmelCase ) lowercase__: List[str] = example.SerializeToString() records.append(__UpperCAmelCase ) return records def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]: lowercase__: Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__: Union[str, Any] = min(len(__UpperCAmelCase ) , args.limit ) lowercase__: Optional[int] = dataset.select(range(__UpperCAmelCase ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) lowercase__: Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__: Union[str, Any] = os.path.join(args.output_dir , args.split ) if not os.path.exists(__UpperCAmelCase ): os.makedirs(__UpperCAmelCase ) else: lowercase__: List[str] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__: str = tokenize_function(__UpperCAmelCase ) lowercase__: Tuple = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__UpperCAmelCase ): # Concatenate all texts. lowercase__: Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__: List[str] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__: Optional[int] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__: Optional[int] = { k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__: Union[str, Any] = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1_0_0_0 , num_proc=4 ) lowercase__: int = 0 lowercase__: List[Any] = 0 for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ): lowercase__: Any = grouped_dataset[shard : shard + args.shard_size] lowercase__: Tuple = len(dataset_snapshot['''input_ids'''] ) lowercase__: Any = os.path.join(__UpperCAmelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) lowercase__: int = get_serialized_examples(__UpperCAmelCase ) with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file: for i in range(len(__UpperCAmelCase ) ): lowercase__: Tuple = serialized_examples[i] out_file.write(__UpperCAmelCase ) print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f: print(F"""Total {args.split} records: {total_records}""" , file=__UpperCAmelCase ) if __name__ == "__main__": __A = parse_args() main(args)
2
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = CTRLTokenizer _UpperCAmelCase :Any = False _UpperCAmelCase :List[Any] = False def _snake_case ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCAmelCase ) ) def _snake_case ( self , **_UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Optional[int] = '''adapt react readapt apt''' return input_text, output_text def _snake_case ( self ): lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__: Optional[int] = '''adapt react readapt apt''' lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = tokens + [tokenizer.unk_token] lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
2
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github __A = [ "good first issue", "feature request", "wip", ] def SCREAMING_SNAKE_CASE__ ( ) -> Any: lowercase__: str = Github(os.environ['''GITHUB_TOKEN'''] ) lowercase__: Tuple = g.get_repo('''huggingface/accelerate''' ) lowercase__: Optional[int] = repo.get_issues(state='''open''' ) for issue in open_issues: lowercase__: Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCAmelCase : i.created_at , reverse=__UpperCAmelCase ) lowercase__: Optional[Any] = comments[0] if len(__UpperCAmelCase ) > 0 else None lowercase__: str = dt.utcnow() lowercase__: Tuple = (current_time - issue.updated_at).days lowercase__: Union[str, Any] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='''closed''' ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
2
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __A = "<<<<<<< This should probably be modified because it mentions: " __A = "=======\n>>>>>>>\n" __A = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __A = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value('\1')"), (R"tfds\.features\.Text\(\)", R"datasets.Value('string')"), (R"tfds\.features\.Text\(", R"datasets.Value('string'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" @staticmethod def _snake_case ( _UpperCAmelCase ): lowercase__: int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ): lowercase__: List[str] = get_logger('''datasets-cli/converting''' ) lowercase__: Optional[Any] = tfds_path lowercase__: Dict = datasets_directory def _snake_case ( self ): if os.path.isdir(self._tfds_path ): lowercase__: Optional[Any] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__: Optional[int] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowercase__: int = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowercase__: Tuple = [] lowercase__: Dict = [] lowercase__: Any = {} if os.path.isdir(self._tfds_path ): lowercase__: Dict = os.listdir(_UpperCAmelCase ) else: lowercase__: Dict = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_UpperCAmelCase , encoding='''utf-8''' ) as f: lowercase__: Tuple = f.readlines() lowercase__: Optional[Any] = [] lowercase__: Dict = False lowercase__: List[str] = False lowercase__: List[Any] = [] for line in lines: lowercase__: List[str] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__: Optional[int] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowercase__: Dict = '''''' continue elif "from absl import logging" in out_line: lowercase__: Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__: Any = True lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' ) out_lines.append(_UpperCAmelCase ) out_lines.append(_UpperCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowercase__: List[str] = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__: Optional[Any] = True out_lines.append(_UpperCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__: Dict = f_name.replace('''.py''' , '''''' ) lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_UpperCAmelCase ) if needs_manual_update: with_manual_update.append(_UpperCAmelCase ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_UpperCAmelCase ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowercase__: str = os.path.basename(_UpperCAmelCase ) lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(_UpperCAmelCase , _UpperCAmelCase ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
2
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]: from transformers.testing_utils import pytest_terminal_summary_main lowercase__: Any = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase__: List[Any] = 0 # Doctest custom flag to ignore output. __A = doctest.register_optionflag("IGNORE_RESULT") __A = doctest.OutputChecker class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __A = CustomOutputChecker __A = HfDoctestModule __A = HfDocTestParser
2
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Tuple = "cvt" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Dict = num_channels lowercase__: str = patch_sizes lowercase__: Optional[Any] = patch_stride lowercase__: List[str] = patch_padding lowercase__: Optional[Any] = embed_dim lowercase__: Optional[int] = num_heads lowercase__: Any = depth lowercase__: str = mlp_ratio lowercase__: Any = attention_drop_rate lowercase__: Any = drop_rate lowercase__: Optional[Any] = drop_path_rate lowercase__: Dict = qkv_bias lowercase__: Dict = cls_token lowercase__: Any = qkv_projection_method lowercase__: List[str] = kernel_qkv lowercase__: Union[str, Any] = padding_kv lowercase__: Optional[int] = stride_kv lowercase__: int = padding_q lowercase__: Dict = stride_q lowercase__: Any = initializer_range lowercase__: Union[str, Any] = layer_norm_eps
2
1
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __A = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __A = {"facebook/blenderbot-3B": 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: lowercase__: str = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowercase__: Tuple = bs[:] lowercase__: Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 lowercase__: Dict = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: lowercase__: Dict = set() lowercase__: Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__: Tuple = char return pairs class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = VOCAB_FILES_NAMES _UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :int = ["input_ids", "attention_mask"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token lowercase__: str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token lowercase__: Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token lowercase__: Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token lowercase__: str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__: Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: lowercase__: Any = json.load(_UpperCAmelCase ) lowercase__: Union[str, Any] = {v: k for k, v in self.encoder.items()} lowercase__: List[str] = errors # how to handle errors in decoding lowercase__: Optional[int] = bytes_to_unicode() lowercase__: Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCAmelCase , encoding='''utf-8''' ) as merges_handle: lowercase__: List[Any] = merges_handle.read().split('''\n''' )[1:-1] lowercase__: Dict = [tuple(merge.split() ) for merge in bpe_merges] lowercase__: List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__: Union[str, Any] = {} lowercase__: List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__: Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self ): return len(self.encoder ) def _snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , _UpperCAmelCase ): if token in self.cache: return self.cache[token] lowercase__: Any = tuple(_UpperCAmelCase ) lowercase__: Optional[int] = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: lowercase__: str = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase__, lowercase__: str = bigram lowercase__: str = [] lowercase__: Optional[Any] = 0 while i < len(_UpperCAmelCase ): try: lowercase__: Any = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__: Tuple = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__: int = tuple(_UpperCAmelCase ) lowercase__: List[Any] = new_word if len(_UpperCAmelCase ) == 1: break else: lowercase__: Tuple = get_pairs(_UpperCAmelCase ) lowercase__: str = ''' '''.join(_UpperCAmelCase ) lowercase__: Dict = word return word def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[Any] = [] for token in re.findall(self.pat , _UpperCAmelCase ): lowercase__: Optional[Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = ''''''.join(_UpperCAmelCase ) lowercase__: int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__: str = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '''\n''' ) lowercase__: str = 0 with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowercase__: str = token_index writer.write(''' '''.join(_UpperCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[int] = [self.sep_token_id] lowercase__: List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ): lowercase__: Optional[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()): lowercase__: Any = ''' ''' + text return (text, kwargs) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self , _UpperCAmelCase ): lowercase__: Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(_UpperCAmelCase ) lowercase__: Optional[int] = ''' '''.join(_UpperCAmelCase ) lowercase__: str = self.encode(_UpperCAmelCase ) if len(_UpperCAmelCase ) > self.model_max_length: lowercase__: Dict = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
2
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = "rag" _UpperCAmelCase :List[Any] = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' ) lowercase__: Any = question_encoder_config.pop('''model_type''' ) lowercase__: Tuple = kwargs.pop('''generator''' ) lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) lowercase__: str = reduce_loss lowercase__: str = label_smoothing lowercase__: Dict = exclude_bos_score lowercase__: Any = do_marginalize lowercase__: Optional[int] = title_sep lowercase__: Any = doc_sep lowercase__: Any = n_docs lowercase__: List[Any] = max_combined_length lowercase__: int = dataset lowercase__: int = dataset_split lowercase__: str = index_name lowercase__: Dict = retrieval_vector_size lowercase__: Dict = retrieval_batch_size lowercase__: List[str] = passages_path lowercase__: str = index_path lowercase__: Optional[Any] = use_dummy_dataset lowercase__: str = output_retrieved lowercase__: List[str] = do_deduplication lowercase__: List[Any] = use_cache if self.forced_eos_token_id is None: lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase ) @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase ) def _snake_case ( self ): lowercase__: List[str] = copy.deepcopy(self.__dict__ ) lowercase__: str = self.question_encoder.to_dict() lowercase__: str = self.generator.to_dict() lowercase__: str = self.__class__.model_type return output
2
1