code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase( __a : str , __a : int ):
# Load checkpoint
a__ =torch.load(__a , map_location='cpu' )
a__ =chkpt['model']
# We have the base model one level deeper than the original XLM repository
a__ ={}
for k, v in state_dict.items():
if "pred_layer" in k:
a__ =v
else:
a__ =v
a__ =chkpt['params']
a__ ={n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
a__ =chkpt['dico_word2id']
a__ ={s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
a__ =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ =pytorch_dump_folder_path + '/' + CONFIG_NAME
a__ =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase: int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCAmelCase_ : Optional[Any] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
UpperCAmelCase_ : int = "|".join(sys.argv[1:])
UpperCAmelCase_ : Union[str, Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
UpperCAmelCase_ : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 21 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=99 , lowerCAmelCase_ : Any=36 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=10_00 , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = patch_size
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = coordinate_size
_a = shape_size
_a = num_labels
_a = num_choices
_a = scope
_a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a = text_seq_length
_a = (image_size // patch_size) ** 2 + 1
_a = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a = bbox[i, j, 3]
_a = bbox[i, j, 1]
_a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_a = bbox[i, j, 2]
_a = bbox[i, j, 0]
_a = tmp_coordinate
_a = tf.constant(lowerCAmelCase_ )
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.text_seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
_a = TFLayoutLMvaModel(config=lowerCAmelCase_ )
# text + image
_a = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , )
_a = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.num_labels
_a = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_a = self.num_labels
_a = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
_a = 2
_a = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str=False ) -> dict:
"""simple docstring"""
_a = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
_a = {
k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = TFLayoutLMvaModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
if getattr(lowerCAmelCase_ , '''hf_compute_loss''' , lowerCAmelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0]
]
_a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class.pop('''input_ids''' )
_a = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_a = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_a = -1_00
_a = tf.convert_to_tensor(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
# Get keys that were added with the _prepare_for_class function
_a = prepared_for_class.keys() - inputs_dict.keys()
_a = inspect.signature(model.call ).parameters
_a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_a = {0: '''input_ids'''}
for label_key in label_keys:
_a = signature_names.index(lowerCAmelCase_ )
_a = label_key
_a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_a = prepared_for_class[value]
_a = tuple(lowerCAmelCase_ )
# Send to model
_a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' ).pixel_values
_a = tf.constant([[1, 2]] )
_a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_a = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_a = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
_a = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 22 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
def _snake_case (__lowercase = 10 , __lowercase = 22):
UpperCamelCase_ = range(1 , __lowercase)
UpperCamelCase_ = range(1 , __lowercase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 23 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Tuple = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCAmelCase_ : Optional[int] = '''=======
>>>>>>>
'''
UpperCAmelCase_ : Tuple = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCAmelCase_ : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _UpperCamelCase (_lowerCamelCase : Namespace )-> int:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase ( __lowerCAmelCase):
@staticmethod
def lowerCAmelCase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = get_logger('''datasets-cli/converting''' )
__snake_case = tfds_path
__snake_case = datasets_directory
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__snake_case = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
__snake_case = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path ):
__snake_case = os.listdir(__SCREAMING_SNAKE_CASE )
else:
__snake_case = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''''''
continue
elif "from absl import logging" in out_line:
__snake_case = '''from datasets import logging\n'''
elif "getLogger" in out_line:
__snake_case = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case = True
__snake_case = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' )
out_lines.append(__SCREAMING_SNAKE_CASE )
out_lines.append(__SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
__snake_case = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(__SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('''.py''' , '''''' )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__snake_case = os.path.basename(__SCREAMING_SNAKE_CASE )
__snake_case = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='deberta-v2'
def __init__( self : List[str] , a : int=12_8100 , a : str=1536 , a : List[Any]=24 , a : Union[str, Any]=24 , a : Union[str, Any]=6144 , a : Dict="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Any=512 , a : Optional[int]=0 , a : Any=0.02 , a : int=1e-7 , a : Dict=False , a : Optional[int]=-1 , a : int=0 , a : Union[str, Any]=True , a : Dict=None , a : List[Any]=0 , a : Tuple="gelu" , **a : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = relative_attention
SCREAMING_SNAKE_CASE : Dict = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = position_biased_input
# Backwards compatibility
if type(a ) == str:
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in pos_att_type.lower().split("|" )]
SCREAMING_SNAKE_CASE : Dict = pos_att_type
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = kwargs.get("pooler_hidden_size" , a )
SCREAMING_SNAKE_CASE : int = pooler_dropout
SCREAMING_SNAKE_CASE : List[str] = pooler_hidden_act
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 12
def __UpperCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , a : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super().generate_dummy_inputs(preprocessor=a , framework=a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 25 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
__snake_case : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
__snake_case : Dict = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__snake_case : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__snake_case : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__snake_case : List[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__snake_case : Dict = tf.placeholder("""float64""" , [dim] )
__snake_case : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__snake_case : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__snake_case : Optional[Any] = tf.placeholder("""int32""" )
__snake_case : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__snake_case : Union[str, Any] = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__snake_case : Optional[int] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__snake_case : int = tf.placeholder("""float""" , [dim] )
__snake_case : Optional[int] = tf.placeholder("""float""" , [dim] )
__snake_case : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__snake_case : Optional[Any] = tf.placeholder("""float""" , [noofclusters] )
__snake_case : Optional[int] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__snake_case : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__snake_case : int = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
__snake_case : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__snake_case : Union[str, Any] = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__snake_case : Dict = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
__snake_case : str = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__snake_case : str = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__snake_case : Tuple = sess.run(_lowerCamelCase )
__snake_case : str = sess.run(_lowerCamelCase )
return centroids, assignments
| 26 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def lowercase__( __UpperCamelCase: Callable[[float], float] ,__UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : float = xa
SCREAMING_SNAKE_CASE : float = xa
while True:
if x_n == x_na or function(__UpperCamelCase ) == function(__UpperCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
SCREAMING_SNAKE_CASE : float = x_na - (
function(__UpperCamelCase ) / ((function(__UpperCamelCase ) - function(__UpperCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE : Dict = x_na
SCREAMING_SNAKE_CASE : Tuple = x_na
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return math.pow(__UpperCamelCase ,3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 28 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
a__: Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
a__: Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
a__: Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
a__: Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
a__: Optional[str] = field(
default='linear' , metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 29 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''image''': Image()} )
lowerCAmelCase = Features({'''labels''': ClassLabel} )
lowerCAmelCase = "image"
lowerCAmelCase = "labels"
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,_SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase_ : Tuple = copy.deepcopy(self )
UpperCAmelCase_ : int = self.label_schema.copy()
UpperCAmelCase_ : Optional[Any] = features[self.label_column]
UpperCAmelCase_ : Dict = label_schema
return task_template
@property
def a__ ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
} | 30 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[Any]:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def UpperCAmelCase_ ( ) -> Union[str, Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_beam
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _lowerCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCAmelCase_ ( self : List[str] ):
import apache_beam as beam
SCREAMING_SNAKE_CASE_ = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE_ = partial(_lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _lowerCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = NestedBeamDataset(cache_dir=_lowerCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _lowerCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset | 31 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_UpperCAmelCase = nums[0]
_UpperCAmelCase = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase__ : Any = """path-to-your-trained-model"""
lowerCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCamelCase__ : int = """A photo of sks dog in a bucket"""
lowerCamelCase__ : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCamelCase = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = vqa_pipeline(lowerCamelCase_ , top_k=1)
self.assertEqual(
lowerCamelCase_ , [
[{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}],
[{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}],
] , )
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase = '''How many cats are there?'''
UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
lowerCamelCase_ , [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}, {'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}])
UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
lowerCamelCase_ , [{'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}, {'''score''': ANY(lowerCamelCase_), '''answer''': ANY(lowerCamelCase_)}])
@slow
@require_torch
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCamelCase = '''How many cats are there?'''
UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}])
UpperCamelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}])
UpperCamelCase = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def UpperCAmelCase__ ( self) -> Optional[int]:
pass | 34 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ :Optional[int] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
__lowercase : dict[tuple[int, int, int], int] = {}
def lowercase ( __A : int , __A : int , __A : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
snake_case : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
snake_case : Union[str, Any] = _calculate(days - 1 , __A , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
snake_case : str = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
snake_case : List[str] = _calculate(days - 1 , __A , 0 )
snake_case : List[str] = state_late + state_absent + state_ontime
snake_case : List[Any] = prizestrings
return prizestrings
def lowercase ( __A : int = 30 ) -> int:
'''simple docstring'''
return _calculate(__A , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 36 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple=13 , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=99 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : str=16 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : int=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1_000 , ):
a__ : Union[str, Any] = parent
a__ : Optional[Any] = batch_size
a__ : Optional[int] = seq_length
a__ : Any = is_training
a__ : Tuple = use_input_mask
a__ : str = use_token_type_ids
a__ : int = use_labels
a__ : int = vocab_size
a__ : int = hidden_size
a__ : List[str] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Any = intermediate_size
a__ : Tuple = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Dict = max_position_embeddings
a__ : int = type_vocab_size
a__ : str = type_sequence_label_size
a__ : List[str] = initializer_range
a__ : List[str] = num_labels
a__ : Any = num_choices
a__ : Union[str, Any] = scope
a__ : Dict = range_bbox
def _UpperCamelCase( self : Optional[Any] ):
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
a__ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ : int = bbox[i, j, 3]
a__ : str = bbox[i, j, 1]
a__ : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ : Union[str, Any] = bbox[i, j, 2]
a__ : int = bbox[i, j, 0]
a__ : Dict = t
a__ : List[str] = tf.convert_to_tensor(lowerCamelCase__ )
a__ : Union[str, Any] = None
if self.use_input_mask:
a__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : List[Any] = None
if self.use_token_type_ids:
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : str = None
a__ : str = None
a__ : int = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : str = ids_tensor([self.batch_size] , self.num_choices )
a__ : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ):
a__ : Any = TFLayoutLMModel(config=lowerCamelCase__ )
a__ : Any = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
a__ : List[Any] = model(lowerCamelCase__ , lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
a__ : Tuple = model(lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str ):
a__ : int = TFLayoutLMForMaskedLM(config=lowerCamelCase__ )
a__ : Union[str, Any] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ):
a__ : Any = self.num_labels
a__ : Optional[Any] = TFLayoutLMForSequenceClassification(config=lowerCamelCase__ )
a__ : int = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
a__ : Any = self.num_labels
a__ : Any = TFLayoutLMForTokenClassification(config=lowerCamelCase__ )
a__ : int = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ):
a__ : Optional[int] = TFLayoutLMForQuestionAnswering(config=lowerCamelCase__ )
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase( self : Dict ):
a__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) : List[Any] = config_and_inputs
a__ : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowercase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = True
_lowercase = 1_0
def _UpperCamelCase( self : List[str] ):
a__ : Optional[int] = TFLayoutLMModelTester(self )
a__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : int ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : Any ):
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Tuple ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = TFLayoutLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _UpperCamelCase( self : Optional[int] ):
pass
def UpperCamelCase_ ( ) -> List[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
a__ : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
a__ : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
a__ : Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
a__ : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
a__ : List[str] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self : int ):
a__ : Union[str, Any] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
a__, a__, a__, a__, a__ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
a__ : Optional[int] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the sequence output on [0, :3, :3]
a__ : Dict = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-3 ) )
# test the pooled output on [1, :3]
a__ : Tuple = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase__ , atol=1E-3 ) )
@slow
def _UpperCamelCase( self : Optional[int] ):
# initialize model with randomly initialized sequence classification head
a__ : Tuple = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
a__, a__, a__, a__, a__ : str = prepare_layoutlm_batch_inputs()
# forward pass
a__ : Optional[Any] = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
a__ : Union[str, Any] = outputs.loss
a__ : Tuple = (2,)
self.assertEqual(loss.shape , lowerCamelCase__ )
# test the shape of the logits
a__ : Dict = outputs.logits
a__ : Any = (2, 2)
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
# initialize model with randomly initialized token classification head
a__ : str = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
a__, a__, a__, a__, a__ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
a__ : Optional[Any] = model(
input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
# test the shape of the logits
a__ : Any = outputs.logits
a__ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
# initialize model with randomly initialized token classification head
a__ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
a__, a__, a__, a__, a__ : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
a__ : List[str] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
# test the shape of the logits
a__ : Dict = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , lowerCamelCase__ )
self.assertEqual(outputs.end_logits.shape , lowerCamelCase__ )
| 37 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''roc_bert'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=9_1_0 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2_4_8_5_8 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[int] = initializer_range
snake_case__ : Any = type_vocab_size
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[Any] = use_cache
snake_case__ : List[Any] = enable_pronunciation
snake_case__ : Tuple = enable_shape
snake_case__ : List[str] = pronunciation_embed_dim
snake_case__ : List[str] = pronunciation_vocab_size
snake_case__ : int = shape_embed_dim
snake_case__ : Dict = shape_vocab_size
snake_case__ : int = concat_input
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : int = classifier_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = CpmAntTokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case__( self : Tuple ) ->Any:
super().setUp()
snake_case_ = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def snake_case__( self : Any ) ->str:
snake_case_ = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
snake_case_ = '''今天天气真好!'''
snake_case_ = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = '''今天天气真好!'''
snake_case_ = [tokenizer.bos_token] + tokens
snake_case_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
snake_case_ = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
from typing import Any
import numpy as np
def _A ( A__ ):
"""simple docstring"""
return np.array_equal(A__ , matrix.conjugate().T )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = v.conjugate().T
__lowercase = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def _A ( ):
"""simple docstring"""
__lowercase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
__lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( __UpperCamelCase ) -> None:
create_state_space_tree(__UpperCamelCase ,[] ,0 )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> None:
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
create_state_space_tree(__UpperCamelCase ,__UpperCamelCase ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__UpperCamelCase ,__UpperCamelCase ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 42 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = ['''pixel_values''']
def __init__( self: Optional[Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: Tuple , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: Any , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: int , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Any , ) -> List[Any]:
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: List[Tuple] = None ) -> int:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(UpperCamelCase_ ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase_ : List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
UpperCAmelCase_ : Any = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
UpperCAmelCase_ : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase : Any = new_id
# turn into Numpy arrays
_lowerCamelCase : str = np.array(_lowerCAmelCase )
_lowerCamelCase : List[str] = np.array(_lowerCAmelCase )
if reduce_labels:
_lowerCamelCase : Optional[int] = 255
_lowerCamelCase : Tuple = label - 1
_lowerCamelCase : List[Any] = 255
_lowerCamelCase : int = label != ignore_index
_lowerCamelCase : str = np.not_equal(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[str] = pred_label[mask]
_lowerCamelCase : str = np.array(_lowerCAmelCase )[mask]
_lowerCamelCase : Union[str, Any] = pred_label[pred_label == label]
_lowerCamelCase : int = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Dict = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Tuple = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = total_intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# compute metrics
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase : Optional[Any] = total_area_intersect / total_area_union
_lowerCamelCase : str = total_area_intersect / total_area_label
_lowerCamelCase : Union[str, Any] = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Dict = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Any = all_acc
_lowerCamelCase : Tuple = iou
_lowerCamelCase : str = acc
if nan_to_num is not None:
_lowerCamelCase : str = {metric: np.nan_to_num(_lowerCAmelCase , nan=_lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ),reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
],)
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Dict,__A : int,__A : bool,__A : Optional[int] = None,__A : Optional[Dict[int, int]] = None,__A : bool = False,):
_lowerCamelCase : Optional[int] = mean_iou(
results=__A,gt_seg_maps=__A,num_labels=__A,ignore_index=__A,nan_to_num=__A,label_map=__A,reduce_labels=__A,)
return iou_result | 44 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
from string import ascii_uppercase
UpperCamelCase = {str(ord(c) - 55): c for c in ascii_uppercase}
def A ( lowercase__ : int , lowercase__ : int ) -> str:
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
UpperCamelCase__ :Optional[int] = """"""
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Dict = 0
while div != 1:
UpperCamelCase__ , UpperCamelCase__ :Tuple = divmod(lowercase__ , lowercase__ )
if base >= 11 and 9 < mod < 36:
UpperCamelCase__ :Tuple = ALPHABET_VALUES[str(lowercase__ )]
else:
UpperCamelCase__ :Dict = str(lowercase__ )
new_value += actual_value
UpperCamelCase__ :Union[str, Any] = num // base
UpperCamelCase__ :Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowercase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
) | 45 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert) | 46 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
__a : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__a : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCamelCase_ , )
is not None
):
__a : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__a : Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__a : List[Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
__a : Optional[Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
__a : Optional[Any] = True
if not attribute_used:
__a : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__a : int = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__a : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__a : Tuple = True
elif attribute.endswith('_token_id' ):
__a : Dict = True
# configuration class specific cases
if not case_allowed:
__a : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : str = dict(inspect.signature(config_class.__init__ ).parameters )
__a : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
__a : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__a : Any = {}
if len(config_class.attribute_map ) > 0:
__a : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__a : Union[str, Any] = inspect.getsourcefile(lowerCamelCase_ )
__a : str = os.path.dirname(lowerCamelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__a : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for fn in os.listdir(lowerCamelCase_ ) if fn.startswith('modeling_' )]
# Get the source code strings
__a : str = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ ) as fp:
modeling_sources.append(fp.read() )
__a : str = []
for config_param, default_value in zip(lowerCamelCase_ , lowerCamelCase_ ):
# `attributes` here is all the variant names for `config_param`
__a : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__a : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCamelCase_ : inspect.isclass(lowerCamelCase_ )
and issubclass(lowerCamelCase_ , lowerCamelCase_ )
and inspect.getmodule(lowerCamelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__a : List[Any] = check_config_attributes_being_used(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__a : Optional[Any] = unused_attributes
if len(lowerCamelCase_ ) > 0:
__a : str = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 47 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
while i * i <= n:
lowerCAmelCase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase_ ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 48 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : Any = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Any , snake_case_ :Optional[Any] , snake_case_ :List[Any]=1E-5 , snake_case_ :Optional[Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Any , snake_case_ :str=0 , snake_case_ :Union[str, Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :int , snake_case_ :str ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Dict ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Dict ):
def _expand_single_ad_tensor(snake_case_ :Tuple ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 49 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
lowerCamelCase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCamelCase__ = os.path.join(self.tmpdirname ,_lowerCAmelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_lowerCAmelCase )
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
lowerCamelCase__ = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" )
lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = CLIPProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 50 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open('''w''' ).write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 51 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
"""simple docstring"""
def __A ( a_ :int , a_ :int) -> int:
return int((input_a, input_a).count(0) == 0)
def __A ( ) -> None:
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 52 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ):
__lowerCAmelCase = 10
__lowerCAmelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__lowerCAmelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCAmelCase_ ) ),
}, features=lowerCAmelCase_, )
return dataset
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return filename
# FILE_CONTENT + files
_snake_case : str = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
__lowerCAmelCase = FILE_CONTENT
with open(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any] ):
import bza
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__lowerCAmelCase = bytes(lowerCAmelCase_, 'utf-8' )
with bza.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any] ):
import gzip
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__lowerCAmelCase = bytes(lowerCAmelCase_, 'utf-8' )
with gzip.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__lowerCAmelCase = bytes(lowerCAmelCase_, 'utf-8' )
with lza.frame.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : List[str] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCAmelCase_, 'w' ) as archive:
archive.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any] ):
import tarfile
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCAmelCase_, 'w' ) as f:
f.add(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Any ):
import lzma
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__lowerCAmelCase = bytes(lowerCAmelCase_, 'utf-8' )
with lzma.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int ):
import zipfile
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__lowerCAmelCase = bytes(lowerCAmelCase_, 'utf-8' )
with zstd.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
__lowerCAmelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_ )
return filename
_snake_case : Dict = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_snake_case : Union[str, Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_snake_case : Dict = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_snake_case : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_snake_case : Tuple = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = datasets.Dataset.from_dict(lowerCAmelCase_ )
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Dict ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
__lowerCAmelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)', tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Dict ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCAmelCase_, 'w', newline='' ) as f:
__lowerCAmelCase = csv.DictWriter(lowerCAmelCase_, fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCAmelCase_, 'w', newline='' ) as f:
__lowerCAmelCase = csv.DictWriter(lowerCAmelCase_, fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[Any] ):
import bza
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase_, 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(csv_path.replace('.csv', '.CSV' ) ) )
f.write(lowerCAmelCase_, arcname=os.path.basename(csva_path.replace('.csv', '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__lowerCAmelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCAmelCase_, 'wb' ) as f:
__lowerCAmelCase = pq.ParquetWriter(lowerCAmelCase_, schema=lowerCAmelCase_ )
__lowerCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_ ) )] for k in DATA[0]}, schema=lowerCAmelCase_ )
writer.write_table(lowerCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__lowerCAmelCase = {'data': DATA}
with open(lowerCAmelCase_, 'w' ) as f:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__lowerCAmelCase = {'data': DATA_DICT_OF_LISTS}
with open(lowerCAmelCase_, 'w' ) as f:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any] ):
import gzip
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCAmelCase_, 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_, 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple ):
import gzip
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCAmelCase_, 'rb' ) as orig_file:
with gzip.open(lowerCAmelCase_, 'wb' ) as zipped_file:
zipped_file.writelines(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.join('nested', os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_, 'w' ) as f:
f.add(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
f.add(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCAmelCase_, 'w' ) as f:
f.add(lowerCAmelCase_, arcname=os.path.join('nested', os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = ['0', '1', '2', '3']
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = ['0', '1', '2', '3']
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCAmelCase_, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = ['0', '1', '2', '3']
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCAmelCase_, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any, lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
f.write(lowerCAmelCase_, arcname=os.path.join('main_dir', os.path.basename(lowerCAmelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCAmelCase_, arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__lowerCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests', 'features', 'data', 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests', 'features', 'data', 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCAmelCase_, 'w' ) as f:
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ) )
f.write(lowerCAmelCase_, arcname=os.path.basename(lowerCAmelCase_ ).replace('.jpg', '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt', 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt', 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 53 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any]=100 , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: str=30 , _lowerCAmelCase: str=2 , _lowerCAmelCase: str=3 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: List[str]=32 , _lowerCAmelCase: str=4 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Optional[int]=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Any=[0, 1, 2, 3] , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =100
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
UpperCAmelCase_ =out_indices
UpperCAmelCase_ =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =BeitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =BeitForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =BeitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =BeitForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =BeitModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
UpperCAmelCase_ =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase_ =model(**_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ =False
UpperCAmelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowerCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
UpperCAmelCase_ =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
UpperCAmelCase_ =model(**_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =_config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =BeitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values.to(_lowerCAmelCase )
# prepare bool_masked_pos
UpperCAmelCase_ =torch.ones((1, 196) , dtype=torch.bool ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(pixel_values=_lowerCAmelCase , bool_masked_pos=_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCAmelCase , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1000) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ =281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ =2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ =model.to(_lowerCAmelCase )
UpperCAmelCase_ =BeitImageProcessor(do_resize=_lowerCAmelCase , size=640 , do_center_crop=_lowerCAmelCase )
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] )
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits
# verify the logits
UpperCAmelCase_ =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ =torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=_lowerCAmelCase , )
else:
UpperCAmelCase_ =torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ =model.to(_lowerCAmelCase )
UpperCAmelCase_ =BeitImageProcessor(do_resize=_lowerCAmelCase , size=640 , do_center_crop=_lowerCAmelCase )
UpperCAmelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ =Image.open(ds[0]["file"] )
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
UpperCAmelCase_ =outputs.logits.detach().cpu()
UpperCAmelCase_ =image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
UpperCAmelCase_ =image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
UpperCAmelCase_ =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 54 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
def UpperCamelCase_ ( self : int ):
__A = self.feature_extraction_class(**self.feat_extract_dict )
__A = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A = os.path.join(A ,"feat_extract.json" )
feat_extract_first.to_json_file(A )
__A = self.feature_extraction_class.from_json_file(A )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : Any ):
__A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__A = self.feature_extraction_class.from_pretrained(A )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def UpperCamelCase_ ( self : Any ):
__A = self.feature_extraction_class()
self.assertIsNotNone(A )
| 55 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : List[Any] = "▁"
_a : str = {"vocab_file": "spiece.model"}
_a : List[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_a : Optional[Any] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]=[] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> None:
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : List[str] ) -> Dict:
return self.sp_model.get_piece_size()
def a ( self : Union[str, Any] ) -> Dict[str, int]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if index < self.sp_model.get_piece_size():
__snake_case = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
__snake_case = []
__snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''xlm'''
a : str ={
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , _lowerCamelCase=3_0_1_4_5 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=True , _lowerCamelCase=5_1_2 , _lowerCamelCase=2_0_4_8**-0.5 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.0_2 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=5 , _lowerCamelCase=True , _lowerCamelCase="first" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=5 , _lowerCamelCase=5 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Any = vocab_size
UpperCamelCase_: Any = emb_dim
UpperCamelCase_: Optional[int] = n_layers
UpperCamelCase_: int = n_heads
UpperCamelCase_: Optional[int] = dropout
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: List[Any] = gelu_activation
UpperCamelCase_: Optional[int] = sinusoidal_embeddings
UpperCamelCase_: Any = causal
UpperCamelCase_: Tuple = asm
UpperCamelCase_: Optional[Any] = n_langs
UpperCamelCase_: int = use_lang_emb
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: Any = bos_index
UpperCamelCase_: Any = eos_index
UpperCamelCase_: Optional[int] = pad_index
UpperCamelCase_: List[str] = unk_index
UpperCamelCase_: Optional[Any] = mask_index
UpperCamelCase_: Optional[int] = is_encoder
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: List[Any] = embed_init_std
UpperCamelCase_: Union[str, Any] = init_std
UpperCamelCase_: Dict = summary_type
UpperCamelCase_: Optional[int] = summary_use_proj
UpperCamelCase_: Dict = summary_activation
UpperCamelCase_: Dict = summary_proj_to_labels
UpperCamelCase_: List[Any] = summary_first_dropout
UpperCamelCase_: Dict = start_n_top
UpperCamelCase_: Optional[Any] = end_n_top
UpperCamelCase_: List[Any] = mask_token_id
UpperCamelCase_: Optional[int] = lang_id
if "n_words" in kwargs:
UpperCamelCase_: str = kwargs['n_words']
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 57 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCAmelCase : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : List[str] = []
if args.gold_data_mode == "qa":
snake_case_ : Optional[int] = pd.read_csv(__UpperCamelCase , sep="""\t""" , header=__UpperCamelCase )
for answer_list in data[1]:
snake_case_ : Dict = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [[reference] for reference in references]
snake_case_ : str = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = 100.0 * em / total
snake_case_ : Optional[Any] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = args.k
snake_case_ : int = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Any = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Dict = set(hypo.split("""\t""" )[:k] )
snake_case_ : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ : Dict = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
def strip_title(__UpperCamelCase : str ):
if title.startswith("""\"""" ):
snake_case_ : Union[str, Any] = title[1:]
if title.endswith("""\"""" ):
snake_case_ : Union[str, Any] = title[:-1]
return title
snake_case_ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase , )["""input_ids"""].to(args.device )
snake_case_ : int = rag_model.rag.question_encoder(__UpperCamelCase )
snake_case_ : Union[str, Any] = question_enc_outputs[0]
snake_case_ : Dict = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
snake_case_ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ : Optional[Any] = []
for docs in all_docs:
snake_case_ : List[Any] = [strip_title(__UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
snake_case_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
snake_case_ : List[Any] = inputs_dict.input_ids.to(args.device )
snake_case_ : Optional[int] = inputs_dict.attention_mask.to(args.device )
snake_case_ : Any = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=__UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=5_0 , type=__UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
if args.model_type is None:
snake_case_ : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
snake_case_ : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
snake_case_ : Tuple = args.n_docs
if args.index_name is not None:
snake_case_ : Tuple = args.index_name
if args.index_path is not None:
snake_case_ : Any = args.index_path
else:
snake_case_ : Optional[Any] = BartForConditionalGeneration
snake_case_ : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
snake_case_ : int = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
snake_case_ : Any = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case_ : Any = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
snake_case_ : int = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
snake_case_ : List[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
snake_case_ : Tuple = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) + """\n""" )
preds_file.flush()
snake_case_ : Dict = []
if len(__UpperCamelCase ) > 0:
snake_case_ : List[str] = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = get_args()
main(args)
| 58 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Optional[Any]=0.25 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=1_024 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Union[str, Any]="relu6" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Tuple=None , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Optional[Any] =image_size
lowerCamelCase__: str =depth_multiplier
lowerCamelCase__: Union[str, Any] =min_depth
lowerCamelCase__: Any =tf_padding
lowerCamelCase__: List[Any] =int(last_hidden_size * depth_multiplier)
lowerCamelCase__: Any =output_stride
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: Union[str, Any] =classifier_dropout_prob
lowerCamelCase__: List[Any] =use_labels
lowerCamelCase__: int =is_training
lowerCamelCase__: Optional[int] =num_labels
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: Any =scope
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Tuple =None
lowerCamelCase__: Tuple =None
if self.use_labels:
lowerCamelCase__: str =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: Dict =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: Union[str, Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =MobileNetVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.num_labels
lowerCamelCase__: Union[str, Any] =MobileNetVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Dict =config_and_inputs
lowerCamelCase__: int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =MobileNetVaModelTester(self)
lowerCamelCase__: Any =MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple =model_class(UpperCAmelCase_)
lowerCamelCase__: int =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple):
lowerCamelCase__: int =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: int =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.hidden_states
lowerCamelCase__: List[str] =26
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[str] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: str =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Any =MobileNetVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: Tuple =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: str =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: List[str] =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Tuple =torch.Size((1, 1_001))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.tensor([-4.1739, -1.1233, 3.1205]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
| 59 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
def lowerCamelCase_ ( _UpperCamelCase = 1_000 ) -> int:
"""simple docstring"""
snake_case_ : Any = -1
snake_case_ : Optional[int] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case_ : str = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case_ : Dict = n - a - b
if c * c == (a * a + b * b):
snake_case_ : Any = a * b * c
if candidate >= product:
snake_case_ : int = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger('transformers.models.encodec')
UpperCamelCase = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
UpperCamelCase = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
UpperCamelCase = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
UpperCamelCase = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
UpperCamelCase = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase = []
UpperCamelCase = []
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase__ = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info(F'{name} was ignored' )
continue
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split(".*." )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "weight_ih_l0" in name:
lowerCAmelCase__ = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCAmelCase__ = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCAmelCase__ = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCAmelCase__ = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCAmelCase__ = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCAmelCase__ = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCAmelCase__ = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCAmelCase__ = "bias_hh_l1"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
lowerCAmelCase__ = "weight"
elif "running_mean" in name:
lowerCAmelCase__ = "running_mean"
elif "running_var" in name:
lowerCAmelCase__ = "running_var"
elif "num_batches_tracked" in name:
lowerCAmelCase__ = "num_batches_tracked"
else:
lowerCAmelCase__ = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = EncodecConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase__ = [8, 5, 4, 4]
lowerCAmelCase__ = [2.2]
lowerCAmelCase__ = 64
lowerCAmelCase__ = 3_2000
lowerCAmelCase__ = 2048
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
elif model_name == "encodec_48khz":
lowerCAmelCase__ = [8, 5, 4, 2]
lowerCAmelCase__ = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase__ = 4_8000
lowerCAmelCase__ = 2
lowerCAmelCase__ = False
lowerCAmelCase__ = "time_group_norm"
lowerCAmelCase__ = True
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase__ = EncodecModel(lowerCAmelCase_ )
lowerCAmelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = torch.load(lowerCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase__ = original_checkpoint["best_state"]
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCAmelCase_ )
model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 61 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
if start < end:
SCREAMING_SNAKE_CASE : int = randint(lowercase , lowercase )
SCREAMING_SNAKE_CASE : int = a[end]
SCREAMING_SNAKE_CASE : str = a[pivot]
SCREAMING_SNAKE_CASE : Dict = temp
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Any = randint(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = a[end]
SCREAMING_SNAKE_CASE : Any = a[pivot]
SCREAMING_SNAKE_CASE : List[str] = temp
SCREAMING_SNAKE_CASE : List[Any] = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE : Optional[Any] = new_pivot_index + 1
SCREAMING_SNAKE_CASE : Dict = a[new_pivot_index]
SCREAMING_SNAKE_CASE : List[Any] = a[index]
SCREAMING_SNAKE_CASE : Optional[int] = temp
SCREAMING_SNAKE_CASE : Any = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE : Optional[Any] = a[end]
SCREAMING_SNAKE_CASE : Tuple = temp
return new_pivot_index + 1, count
snake_case = TemporaryFile()
snake_case = 100 # 1000 elements are to be sorted
snake_case , snake_case = 0, 1 # mean and standard deviation
snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
snake_case = np.load(outfile)
snake_case = len(M) - 1
snake_case = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 62 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : List[str] = get_activation("""gelu""" )
__UpperCAmelCase : Tuple = get_activation("""gelu_10""" )
__UpperCAmelCase : int = torch_builtin(__lowercase )
__UpperCAmelCase : Union[str, Any] = geluaa(__lowercase )
__UpperCAmelCase : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__lowercase ):
get_activation("""bogus""" )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : Tuple = get_activation("""gelu""" )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
__UpperCAmelCase : List[Any] = acta.a
| 63 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from math import sqrt
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def A__ ( snake_case_ : int = 10_000 ):
SCREAMING_SNAKE_CASE__: int= sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 64 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """fnet"""
def __init__( self : Optional[Any] ,A : Tuple=32_000 ,A : str=768 ,A : List[str]=12 ,A : List[Any]=3_072 ,A : Optional[Any]="gelu_new" ,A : Tuple=0.1 ,A : List[str]=512 ,A : Any=4 ,A : Optional[int]=0.0_2 ,A : Optional[int]=1e-12 ,A : List[Any]=False ,A : str=512 ,A : List[Any]=3 ,A : int=1 ,A : List[str]=2 ,**A : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Optional[int] = use_tpu_fourier_optimizations
UpperCAmelCase__ : Optional[Any] = tpu_short_seq_length
| 65 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
from math import isqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE ) + 1 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE = 10**6 ) -> int:
_lowercase : List[Any] = 0
_lowercase : Dict = 1
_lowercase : Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase__ ( A_: str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__UpperCAmelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
__UpperCAmelCase =F'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowercase__ ( A_: dict ) -> dict:
"""simple docstring"""
__UpperCAmelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__UpperCAmelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__UpperCAmelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
__UpperCAmelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(A_ , A_ ):
__UpperCAmelCase =""", """.join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__A = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("\n".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
return getitem, k
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> str:
return setitem, k, v
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
return delitem, k
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , *_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase ), None
except Exception as e:
return None, e
a : Optional[int] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
a : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
a : List[str] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
a : Dict = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
a : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> int:
__snake_case = HashMap(initial_block_size=4 )
__snake_case = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
def is_public(_UpperCAmelCase : str ) -> bool:
return not name.startswith("_" )
__snake_case = {name for name in dir({} ) if is_public(_UpperCAmelCase )}
__snake_case = {name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 69 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
import cmath
import math
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = math.radians(lowercase )
lowerCamelCase_ = math.radians(lowercase )
# Convert voltage and current to rectangular form
lowerCamelCase_ = cmath.rect(lowercase , lowercase )
lowerCamelCase_ = cmath.rect(lowercase , lowercase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _snake_case :
def __init__( self ):
UpperCAmelCase_ : Union[str, Any] = {}
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = {}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
if nodea not in self.connections:
self.add_node(_snake_case )
if nodea not in self.connections:
self.add_node(_snake_case )
UpperCAmelCase_ : Dict = probability
def UpperCamelCase__ ( self ):
return list(self.connections )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , _SCREAMING_SNAKE_CASE : int ) -> dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = Counter(graph.get_nodes() )
UpperCAmelCase_ : Any = start
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = graph.transition(_SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> bool:
'''simple docstring'''
lowercase =(1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase ( lowercase_ : int = 5_0_0_0 ) -> int:
'''simple docstring'''
lowercase =[(i * (3 * i - 1)) // 2 for i in range(1 , lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ , len(lowercase_ ) ):
lowercase =pentagonal_nums[j]
lowercase =pentagonal_i + pentagonal_j
lowercase =pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _snake_case ( A__ ):
def __init__( self , a = None , a = None , a = None , a = None , a = False , a = False , a = None , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = path_or_paths
SCREAMING_SNAKE_CASE = split if split or isinstance(a , a) else 'train'
SCREAMING_SNAKE_CASE = features
SCREAMING_SNAKE_CASE = cache_dir
SCREAMING_SNAKE_CASE = keep_in_memory
SCREAMING_SNAKE_CASE = streaming
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class _snake_case ( A__ ):
def __init__( self , a = None , a = None , a = False , a = False , a = None , **a , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = features
SCREAMING_SNAKE_CASE = cache_dir
SCREAMING_SNAKE_CASE = keep_in_memory
SCREAMING_SNAKE_CASE = streaming
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Union[Dataset, IterableDataset]:
pass
| 73 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = arr.split(''',''' )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(self.array[0] )] * len(self.array )
__SCREAMING_SNAKE_CASE : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("""please input some numbers:""")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("""the results is:""", re))
| 74 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ):
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 75 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
"""simple docstring"""
a_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 76 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _UpperCamelCase ( UpperCamelCase = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(UpperCamelCase ):
__UpperCAmelCase : Any = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase , UpperCamelCase ).lstrip("./" )
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(UpperCamelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _UpperCamelCase ( UpperCamelCase = "." ) -> None:
"""simple docstring"""
__UpperCAmelCase : str = ""
for filepath in sorted(good_file_paths(UpperCamelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = os.path.split(UpperCamelCase )
if filepath != old_path:
__UpperCAmelCase : Tuple = print_path(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__UpperCAmelCase : Any = f"{filepath}/{filename}".replace(" " , "%20" )
__UpperCAmelCase : int = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"{md_prefix(UpperCamelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 77 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE_: int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None ) -> int:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = None
def _a ( self : int , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=torch.floataa ) -> Tuple:
"""simple docstring"""
__lowercase = torch.empty(_lowerCAmelCase , dtype=_lowerCAmelCase )
dist.scatter(_lowerCAmelCase , src=0 , scatter_list=_lowerCAmelCase , group=self.process_group )
return target_tensor
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("""e""" )) , _lowerCAmelCase )
return ifname
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCAmelCase )]
dist.gather(torch.tensor(_lowerCAmelCase ) , dst=0 , gather_list=_lowerCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_lowerCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_lowerCAmelCase ).numpy() , _lowerCAmelCase )
__lowercase , __lowercase = torch.tensor(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCAmelCase )
| 80 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : List[str] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__snake_case : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__snake_case : str = model(lowerCamelCase )["last_hidden_state"]
__snake_case : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice.
__snake_case : Tuple = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 81 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''esm'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=1026 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[str]=1e-12 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = emb_layer_norm_before
UpperCAmelCase_ = token_dropout
UpperCAmelCase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = EsmFoldConfig(**_UpperCAmelCase )
UpperCAmelCase_ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_ = get_default_vocab_list()
else:
UpperCAmelCase_ = vocab_list
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
UpperCAmelCase_ = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = 1_28
UpperCamelCase = None
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase_ = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
UpperCAmelCase_ = TrunkConfig(**self.trunk )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = asdict(self )
UpperCAmelCase_ = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 48
UpperCamelCase = 10_24
UpperCamelCase = 1_28
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 32
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = False
UpperCamelCase = 4
UpperCamelCase = 1_28
UpperCamelCase = None
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase_ = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
UpperCAmelCase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_ = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = asdict(self )
UpperCAmelCase_ = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 3_84
UpperCamelCase = 1_28
UpperCamelCase = 16
UpperCamelCase = 1_28
UpperCamelCase = 12
UpperCamelCase = 4
UpperCamelCase = 8
UpperCamelCase = 0.1
UpperCamelCase = 8
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 7
UpperCamelCase = 10
UpperCamelCase = 1E-8
UpperCamelCase = 1E5
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return asdict(self )
def a__ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 82 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : str=0.01 , __lowerCAmelCase : Any=1_0_0_0 ):
"""simple docstring"""
_lowerCamelCase : List[str] = p_stop
_lowerCamelCase : List[Any] = max_length
def __iter__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : int = random.random() < self.p_stop
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_lowerCamelCase : List[str] = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : Any = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=False ):
"""simple docstring"""
random.seed(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_lowerCamelCase : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_lowerCamelCase : List[str] = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_lowerCamelCase : str = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
Accelerator()
_lowerCamelCase : Any = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 83 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
lowercase = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = pil_image.size
lowercase = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type='dict' , config=__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase , lowercase , lowercase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = apply_ocr
lowercase = ocr_lang
lowercase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase = []
lowercase = []
for image in images:
lowercase , lowercase = apply_tesseract(snake_case , snake_case , snake_case )
words_batch.append(snake_case )
boxes_batch.append(snake_case )
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase = [flip_channel_order(snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
if apply_ocr:
lowercase = words_batch
lowercase = boxes_batch
return data
| 84 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : str , a_ : int , a_ : int , a_ : Optional[int] = None , a_ : int = 5_0257 , a_ : int = 1024 , a_ : int = 768 , a_ : int = 12 , a_ : int = 12 , a_ : Optional[int] = None , a_ : str = "gelu_new" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 1e-5 , a_ : float = 0.02 , a_ : bool = True , a_ : bool = True , a_ : bool = False , a_ : bool = False , )-> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE__ : str = prefix_hidden_dim
SCREAMING_SNAKE_CASE__ : str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , a_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE__ : List[str] = GPTaConfig(
vocab_size=a_ , n_positions=a_ , n_embd=a_ , n_layer=a_ , n_head=a_ , n_inner=a_ , activation_function=a_ , resid_pdrop=a_ , embd_pdrop=a_ , attn_pdrop=a_ , layer_norm_epsilon=a_ , initializer_range=a_ , scale_attn_weights=a_ , use_cache=a_ , scale_attn_by_inverse_layer_idx=a_ , reorder_and_upcast_attn=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaLMHeadModel(a_ )
def __lowercase( self : Any , a_ : torch.Tensor , a_ : torch.Tensor , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.transformer.transformer.wte(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.encode_prefix(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decode_prefix(a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE__ : Any = self.transformer(inputs_embeds=a_ , labels=a_ , attention_mask=a_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowercase( self : str , a_ : int , a_ : torch.device )-> torch.Tensor:
"""simple docstring"""
return torch.zeros(a_ , self.prefix_length , dtype=torch.intaa , device=a_ )
def __lowercase( self : str , a_ : int )-> Any:
"""simple docstring"""
return self.encode_prefix(a_ )
@torch.no_grad()
def __lowercase( self : List[Any] , a_ : Tuple , a_ : int , a_ : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = torch.split(a_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Dict = []
for feature in features:
SCREAMING_SNAKE_CASE__ : Dict = self.decode_prefix(feature.to(a_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.generate_beam(
input_embeds=a_ , device=a_ , eos_token_id=a_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.stack(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.stack(a_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowercase( self : Tuple , a_ : Optional[int]=None , a_ : Optional[Any]=None , a_ : str=None , a_ : int = 5 , a_ : int = 67 , a_ : float = 1.0 , a_ : Optional[int] = None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(a_ , device=a_ , dtype=torch.int )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(a_ , device=a_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE__ : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.transformer.transformer.wte(a_ )
for i in range(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = self.transformer(inputs_embeds=a_ )
SCREAMING_SNAKE_CASE__ : int = outputs.logits
SCREAMING_SNAKE_CASE__ : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE__ : Tuple = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = logits.topk(a_ , -1 )
SCREAMING_SNAKE_CASE__ : List[str] = generated.expand(a_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE__ : Dict = next_tokens
else:
SCREAMING_SNAKE_CASE__ : Dict = tokens.expand(a_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE__ : List[str] = -float(np.inf )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE__ : Dict = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = scores_sum_average.view(-1 ).topk(a_ , -1 )
SCREAMING_SNAKE_CASE__ : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE__ : str = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE__ : Any = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE__ : int = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE__ : Any = generated[next_tokens_source]
SCREAMING_SNAKE_CASE__ : List[str] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE__ : int = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE__ : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE__ : Tuple = is_stopped + next_tokens.eq(a_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE__ : Dict = scores / seq_lengths
SCREAMING_SNAKE_CASE__ : List[Any] = scores.argsort(descending=a_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE__ : Any = torch.stack(a_ , dim=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 85 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str=3 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=("DownEncoderBlock2D",) , UpperCAmelCase : Tuple=(64,) , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Optional[int]="silu" , UpperCAmelCase : List[Any]=True , ):
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCAmelCase ) - 1
A_ = get_down_block(
UpperCAmelCase , num_layers=self.layers_per_block , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , )
self.down_blocks.append(UpperCAmelCase )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase , eps=1E-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCAmelCase , 3 , padding=1 )
A_ = False
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = x
A_ = self.conv_in(UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : int ):
def custom_forward(*UpperCAmelCase : str ):
return module(*UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCAmelCase )
# middle
A_ = self.mid_block(UpperCAmelCase )
# post-process
A_ = self.conv_norm_out(UpperCAmelCase )
A_ = self.conv_act(UpperCAmelCase )
A_ = self.conv_out(UpperCAmelCase )
return sample
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : str=3 , UpperCAmelCase : Any=3 , UpperCAmelCase : int=("UpDecoderBlock2D",) , UpperCAmelCase : str=(64,) , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[Any]="silu" , UpperCAmelCase : Any="group" , ):
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == "spatial" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# up
A_ = list(reversed(UpperCAmelCase ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCAmelCase ) - 1
A_ = get_up_block(
UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , resnet_time_scale_shift=UpperCAmelCase , )
self.up_blocks.append(UpperCAmelCase )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCAmelCase )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase , eps=1E-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCAmelCase , 3 , padding=1 )
A_ = False
def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None ):
A_ = z
A_ = self.conv_in(UpperCAmelCase )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Union[str, Any] ):
def custom_forward(*UpperCAmelCase : int ):
return module(*UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
A_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase )
A_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
else:
# middle
A_ = self.mid_block(UpperCAmelCase , UpperCAmelCase )
A_ = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCAmelCase , UpperCAmelCase )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCAmelCase )
else:
A_ = self.conv_norm_out(UpperCAmelCase , UpperCAmelCase )
A_ = self.conv_act(UpperCAmelCase )
A_ = self.conv_out(UpperCAmelCase )
return sample
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : int=None , UpperCAmelCase : str="random" , UpperCAmelCase : int=False , UpperCAmelCase : Optional[Any]=True ):
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def __A ( self : str , UpperCAmelCase : Tuple ):
A_ = inds.shape
assert len(UpperCAmelCase ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCAmelCase )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Union[str, Any] ):
A_ = inds.shape
assert len(UpperCAmelCase ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase )
return back.reshape(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCAmelCase , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCAmelCase ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCAmelCase )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCAmelCase )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCAmelCase )
if shape is not None:
A_ = z_q.view(UpperCAmelCase )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]=False ):
A_ = parameters
A_ , A_ = torch.chunk(UpperCAmelCase , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def __A ( self : Dict , UpperCAmelCase : List[Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __A ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
return self.mean | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''gpt_neox'''
def __init__( self : Any , UpperCAmelCase__ : Optional[int]=50_432 , UpperCAmelCase__ : str=6_144 , UpperCAmelCase__ : List[Any]=44 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Any=24_576 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : int=0.25 , UpperCAmelCase__ : int=10_000 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : int=2_048 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : int , ) ->str:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = attention_dropout
A__ = hidden_dropout
A__ = classifier_dropout
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = tie_word_embeddings
A__ = use_parallel_residual
A__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''')
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""")
A__ = self.rope_scaling.get('''type''' , UpperCAmelCase__)
A__ = self.rope_scaling.get('''factor''' , UpperCAmelCase__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 87 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple:
# Input as list
_lowerCamelCase : Any = list(poly_a or [0])[:]
_lowerCamelCase : Optional[Any] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase : int = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase : Union[str, Any] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
_lowerCamelCase : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
_lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
_lowerCamelCase : int = self.__multiply()
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE) <= 1:
return dft[0]
#
_lowerCamelCase : str = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)]
_lowerCamelCase : Tuple = self.root**next_ncol
# First half of next step
_lowerCamelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
_lowerCamelCase : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
_lowerCamelCase : Union[str, Any] = new_dft
_lowerCamelCase : List[str] = next_ncol // 2
return dft[0]
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[Any] = self.__dft("""A""")
_lowerCamelCase : List[str] = self.__dft("""B""")
_lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase : List[str] = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)]
_lowerCamelCase : List[Any] = self.root ** (next_ncol // 2)
_lowerCamelCase : str = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
_lowerCamelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self) -> Any:
_lowerCamelCase : Dict = """A = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
_lowerCamelCase : List[Any] = """B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
_lowerCamelCase : int = """A*B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
class _lowerCamelCase( _a ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ["""input_ids""", """attention_mask"""]
lowercase_ : Dict = BarthezTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, **lowerCamelCase, )
_lowercase : Tuple = vocab_file
_lowercase : Any = False if not self.vocab_file else True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
return (out_vocab_file,)
| 89 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_="" , lowerCamelCase_="train" ) -> Optional[int]:
assert os.path.isdir(lowerCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = os.listdir(lowerCamelCase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isfile(lowerCamelCase_ ):
continue
self.documents.append(lowerCamelCase_ )
def __len__( self ) -> List[Any]:
return len(self.documents )
def __getitem__( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.documents[idx]
lowerCAmelCase__ = document_path.split('''/''' )[-1]
with open(lowerCamelCase_ , encoding='''utf-8''' ) as source:
lowerCAmelCase__ = source.read()
lowerCAmelCase__ , lowerCAmelCase__ = process_story(lowerCamelCase_ )
return document_name, story_lines, summary_lines
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = list(filter(lambda A : len(A ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ = [_add_missing_period(A ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ = []
lowerCAmelCase__ = deque(A )
while True:
try:
lowerCAmelCase__ = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ = list(filter(lambda A : not t.startswith('''@highlight''' ) , A ) )
return story_lines, summary_lines
def _snake_case ( A ) -> Dict:
lowerCAmelCase__ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _snake_case ( A , A , A ) -> str:
if len(A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(A )) )
return sequence
def _snake_case ( A , A ) -> str:
lowerCAmelCase__ = torch.ones_like(A )
lowerCAmelCase__ = sequence == pad_token_id
lowerCAmelCase__ = 0
return mask
def _snake_case ( A , A , A ) -> Dict:
lowerCAmelCase__ = [tokenizer.encode(A ) for line in story_lines]
lowerCAmelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ = [tokenizer.encode(A ) for line in summary_lines]
lowerCAmelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = []
for sequence in batch:
lowerCAmelCase__ = -1
lowerCAmelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(A )
return torch.tensor(A ) | 90 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Union[str, Any] ,A_ : str=2 ,A_ : Union[str, Any]=3 ,A_ : Dict=4 ,A_ : Optional[Any]=2 ,A_ : Any=7 ,A_ : Any=True ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Optional[int]=True ,A_ : Dict=99 ,A_ : int=36 ,A_ : Dict=2 ,A_ : Tuple=4 ,A_ : List[Any]=37 ,A_ : int="gelu" ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : Optional[int]=512 ,A_ : Dict=16 ,A_ : Dict=2 ,A_ : int=0.02 ,A_ : Dict=6 ,A_ : Dict=6 ,A_ : int=3 ,A_ : str=4 ,A_ : List[Any]=None ,A_ : str=1000 ,) -> Optional[Any]:
A = parent
A = batch_size
A = num_channels
A = image_size
A = patch_size
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = coordinate_size
A = shape_size
A = num_labels
A = num_choices
A = scope
A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A = text_seq_length
A = (image_size // patch_size) ** 2 + 1
A = self.text_seq_length + self.image_seq_length
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
A = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A = bbox[i, j, 3]
A = bbox[i, j, 1]
A = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A = bbox[i, j, 2]
A = bbox[i, j, 0]
A = tmp_coordinate
A = tf.constant(A_ )
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.text_seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
A = TFLayoutLMvaModel(config=A_ )
# text + image
A = model(A_ ,pixel_values=A_ ,training=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,training=A_ ,)
A = model(A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
A = model(A_ ,training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A = model({'pixel_values': pixel_values} ,training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : Any ,A_ : str ,A_ : str ,A_ : List[str] ,A_ : Optional[Any] ,A_ : List[str] ) -> Union[str, Any]:
A = self.num_labels
A = TFLayoutLMvaForSequenceClassification(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Any ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Union[str, Any] ,A_ : List[Any] ) -> Union[str, Any]:
A = self.num_labels
A = TFLayoutLMvaForTokenClassification(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ) -> Optional[Any]:
A = 2
A = TFLayoutLMvaForQuestionAnswering(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,training=A_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs
A = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: int = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowerCamelCase: int = False
_lowerCamelCase: Any = False
_lowerCamelCase: Any = False
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Dict ) -> List[Any]:
return True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int]=False ) -> dict:
A = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
A = {
k: tf.tile(tf.expand_dims(A_ ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
A = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
A = TFLayoutLMvaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
if getattr(A_ ,'hf_compute_loss' ,A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=A_ )[0]
]
A = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class.pop('input_ids' )
A = model(A_ ,**A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
A = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A = -100
A = tf.convert_to_tensor(A_ )
A = model(A_ ,**A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
A = prepared_for_class.keys() - inputs_dict.keys()
A = inspect.signature(model.call ).parameters
A = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A = {0: 'input_ids'}
for label_key in label_keys:
A = signature_names.index(A_ )
A = label_key
A = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A = prepared_for_class[value]
A = tuple(A_ )
# Send to model
A = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
A = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='tf' ).pixel_values
A = tf.constant([[1, 2]] )
A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
A = model(input_ids=A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ )
# verify the logits
A = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,A_ )
A = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,A_ ,atol=1e-4 ) ) | 91 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase_ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Dict =torch.manual_seed(0 )
lowercase : Union[str, Any] =pipe.dual_guided(
prompt='''first prompt''' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[Any] =generator.manual_seed(0 )
lowercase : Optional[Any] =pipe.dual_guided(
prompt='''first prompt''' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int ='''cyberpunk 2077'''
lowercase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : Optional[Any] =pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase : List[Any] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : int =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : List[str] ='''A painting of a squirrel eating a burger '''
lowercase : str =torch.manual_seed(0 )
lowercase : Any =pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[Any] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Dict =pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''numpy''' ).images
lowercase : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 92 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
__A = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
__A = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
__A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
__A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 93 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : str =torch.load(__A , map_location='''cpu''' )
return sd
def lowercase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Tuple=rename_keys_prefix ) -> Tuple:
"""simple docstring"""
lowercase : Any =OrderedDict()
lowercase : Optional[Any] =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase : List[Any] =key
for name_pair in rename_keys_prefix:
lowercase : Any =new_key.replace(name_pair[0] , name_pair[1] )
lowercase : Optional[Any] =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase : Union[str, Any] =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowercase_ ( __A : Dict , __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowercase : Union[str, Any] ='''pretraining'''
if "vcr" in checkpoint_path:
lowercase : Optional[int] ={'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase : Optional[Any] ={'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase : str ={'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase : Tuple ={'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowercase : Optional[int] ={'''visual_embedding_dim''': 5_1_2}
lowercase : Dict ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase : Optional[int] ={'''visual_embedding_dim''': 2_0_4_8}
lowercase : int ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase : Optional[int] ={'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
lowercase : Any ='''vqa'''
elif "nlvr" in checkpoint_path:
lowercase : Optional[int] ={
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
lowercase : str ='''nlvr'''
lowercase : Dict =VisualBertConfig(**__A )
# Load State Dict
lowercase : Union[str, Any] =load_state_dict(__A )
lowercase : Optional[Any] =get_new_dict(__A , __A )
if model_type == "pretraining":
lowercase : Tuple =VisualBertForPreTraining(__A )
elif model_type == "vqa":
lowercase : str =VisualBertForQuestionAnswering(__A )
elif model_type == "nlvr":
lowercase : Optional[int] =VisualBertForVisualReasoning(__A )
elif model_type == "multichoice":
lowercase : Union[str, Any] =VisualBertForMultipleChoice(__A )
model.load_state_dict(__A )
# Save Checkpoints
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 94 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Tuple = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Optional[Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase_ : Tuple = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
"score": ANY(lowerCAmelCase_ ),
"label": ANY(lowerCAmelCase_ ),
"box": {"xmin": ANY(lowerCAmelCase_ ), "ymin": ANY(lowerCAmelCase_ ), "xmax": ANY(lowerCAmelCase_ ), "ymax": ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCAmelCase_ : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCAmelCase_ : List[str] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
pass
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[int] = 0.2
UpperCAmelCase_ : Any = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 95 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCamelCase = logging.get_logger(__name__)
class __A :
def __init__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict ) -> Dict:
__magic_name__: List[Any] = question_encoder
__magic_name__: Tuple = generator
__magic_name__: List[str] = self.question_encoder
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any:
if os.path.isfile(__snake_case ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__snake_case , exist_ok=__snake_case )
__magic_name__: Union[str, Any] = os.path.join(__snake_case , """question_encoder_tokenizer""" )
__magic_name__: str = os.path.join(__snake_case , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__snake_case )
self.generator.save_pretrained(__snake_case )
@classmethod
def lowerCamelCase__ ( cls : Tuple , __snake_case : Optional[Any] , **__snake_case : List[Any] ) -> str:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__magic_name__: Optional[Any] = kwargs.pop("""config""" , __snake_case )
if config is None:
__magic_name__: Union[str, Any] = RagConfig.from_pretrained(__snake_case )
__magic_name__: Optional[int] = AutoTokenizer.from_pretrained(
__snake_case , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__magic_name__: Union[str, Any] = AutoTokenizer.from_pretrained(
__snake_case , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__snake_case , generator=__snake_case )
def __call__( self : Dict , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Tuple:
return self.current_tokenizer(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> str:
return self.generator.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[int] , *__snake_case : Any , **__snake_case : Dict ) -> Optional[Any]:
return self.generator.decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
__magic_name__: str = self.question_encoder
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: Union[str, Any] = self.generator
def lowerCamelCase__ ( self : str , __snake_case : List[str] , __snake_case : Optional[List[str]] = None , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "longest" , __snake_case : str = None , __snake_case : bool = True , **__snake_case : Optional[Any] , ) -> BatchEncoding:
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __snake_case , )
if max_length is None:
__magic_name__: List[str] = self.current_tokenizer.model_max_length
__magic_name__: Dict = self(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , max_length=__snake_case , padding=__snake_case , truncation=__snake_case , **__snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__magic_name__: Dict = self.current_tokenizer.model_max_length
__magic_name__: Union[str, Any] = self(
text_target=__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , **__snake_case , )
__magic_name__: List[str] = labels["""input_ids"""]
return model_inputs
| 96 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import math
def a ( snake_case__: int ):
'''simple docstring'''
return math.sqrt(snake_case__ ) * math.sqrt(snake_case__ ) == num
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = n
while left <= right:
lowercase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase_ = mid - 1
else:
lowercase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
def a (lowerCAmelCase__ ):
__a = []
if len(lowerCAmelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase__ ) ):
__a = nums.pop(0 )
__a = permute(lowerCAmelCase__ )
for perm in permutations:
perm.append(lowerCAmelCase__ )
result.extend(lowerCAmelCase__ )
nums.append(lowerCAmelCase__ )
return result
def a (lowerCAmelCase__ ):
def backtrack(lowerCAmelCase__ ):
if start == len(lowerCAmelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a , __a = nums[i], nums[start]
backtrack(start + 1 )
__a , __a = nums[i], nums[start] # backtrack
__a = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
SCREAMING_SNAKE_CASE = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 99 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
import requests
_A : str = """YOUR API KEY"""
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = giphy_api_key ) -> list:
SCREAMING_SNAKE_CASE__ = '''+'''.join(query.split() )
SCREAMING_SNAKE_CASE__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 100 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from __future__ import annotations
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = result + left + right
return input_list
def a__ ( A__ ):
if len(A__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(A__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE_ : Any = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0, len(A__ ), A__ ):
SCREAMING_SNAKE_CASE_ : str = i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i + p - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = (low + high + 1) // 2
SCREAMING_SNAKE_CASE_ : List[Any] = merge(A__, A__, A__, A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i
SCREAMING_SNAKE_CASE_ : int = merge(A__, 0, A__, len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
lowerCAmelCase__ : Dict =[]
else:
lowerCAmelCase__ : Optional[int] =[int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 101 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BartphoTokenizer
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = True
def _a ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Optional[int] = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
UpperCamelCase : int = dict(zip(_A , range(len(_A ) ) ) )
UpperCamelCase : Optional[int] = {"""unk_token""": """<unk>"""}
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
UpperCamelCase : Any = BartphoTokenizer(_A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_A )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Optional[int] = """This is a là test"""
UpperCamelCase : str = """This is a<unk><unk> test"""
return input_text, output_text
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = BartphoTokenizer(_A , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase : Tuple = """This is a là test"""
UpperCamelCase : Tuple = """▁This ▁is ▁a ▁l à ▁t est""".split()
UpperCamelCase : Any = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 102 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
snake_case = 2
class UpperCAmelCase :
def __init__( self : Dict , *, # begin keyword-only arguments
__lowerCamelCase : Dict="<s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case = bos, unk, pad, eos
_snake_case = []
_snake_case = []
_snake_case = {}
_snake_case = self.add_symbol(__lowerCamelCase )
_snake_case = self.add_symbol(__lowerCamelCase )
_snake_case = self.add_symbol(__lowerCamelCase )
_snake_case = self.add_symbol(__lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCamelCase )
_snake_case = len(self.symbols )
def __eq__( self : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Dict , __lowerCamelCase : Tuple ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Dict , __lowerCamelCase : Tuple ):
"""simple docstring"""
return sym in self.indices
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = cls()
d.add_from_file(__lowerCamelCase )
return d
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_snake_case = self.indices[word]
_snake_case = self.count[idx] + n
return idx
else:
_snake_case = len(self.symbols )
_snake_case = idx
self.symbols.append(__lowerCamelCase )
self.count.append(__lowerCamelCase )
return idx
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return 0
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__lowerCamelCase ) )
return
_snake_case = f.readlines()
_snake_case = self._load_meta(__lowerCamelCase )
for line in lines[indices_start_line:]:
try:
_snake_case , _snake_case = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_snake_case = True
_snake_case , _snake_case = line.rsplit(''' ''' , 1 )
else:
_snake_case = False
_snake_case = int(__lowerCamelCase )
_snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__lowerCamelCase ) )
self.add_symbol(__lowerCamelCase , n=__lowerCamelCase , overwrite=__lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def snake_case ( lowerCAmelCase_ ) -> List[str]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase_ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase_ ), v) for k, v in d.items() )
_snake_case = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_snake_case = d[k] # restore
return da
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# prep
if not os.path.exists(lowerCAmelCase_ ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_snake_case = os.path.join(lowerCAmelCase_ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
_snake_case = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
_snake_case = chkpt['''cfg''']['''model''']
# dicts
_snake_case = os.path.join(lowerCAmelCase_ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
_snake_case = Dictionary.load(lowerCAmelCase_ )
_snake_case = rewrite_dict_keys(src_dict.indices )
_snake_case = len(lowerCAmelCase_ )
_snake_case = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# merges_file (bpecodes)
_snake_case = os.path.join(lowerCAmelCase_ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
_snake_case = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
# model config
_snake_case = os.path.join(lowerCAmelCase_ , '''config.json''' )
_snake_case = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# tokenizer config
_snake_case = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ , indent=lowerCAmelCase_ ) )
# model
_snake_case = chkpt['''model''']
# remove unneeded keys
_snake_case = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_snake_case = model_state_dict.pop(lowerCAmelCase_ )
else:
_snake_case = model_state_dict.pop(lowerCAmelCase_ )
_snake_case = BioGptConfig.from_pretrained(lowerCAmelCase_ )
_snake_case = BioGptForCausalLM(lowerCAmelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase_ )
# save
_snake_case = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 103 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase_ : int | str ) -> bool:
"""simple docstring"""
A__ = str(UpperCAmelCase_ )
return n == n[::-1]
def _lowerCamelCase ( UpperCAmelCase_ : int = 1000000 ) -> Dict:
"""simple docstring"""
A__ = 0
for i in range(1, UpperCAmelCase_ ):
if is_palindrome(UpperCAmelCase_ ) and is_palindrome(bin(UpperCAmelCase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 104 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase__ : Union[str, Any] = ['''bert-base-uncased''', '''bert-base-cased''']
UpperCamelCase__ : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class lowerCAmelCase_ ( tf.keras.Model ):
def __init__( self ,snake_case__ ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModel.from_config(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.bert(**snake_case__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
BertTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ : Optional[Any] = [TFBertTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case__ ,use_fast_bert_tokenizer=snake_case__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE_ : Dict = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
SCREAMING_SNAKE_CASE_ : Dict = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def snake_case ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(snake_case__ ,return_tensors='tf' ,padding='longest' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf_tokenizer(snake_case__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE_ : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,)
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : str = tf.function(snake_case__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ : List[Any] = tf.constant(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = compiled_tokenizer(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ : int = ModelToSave(tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Any = Path(snake_case__ ) / 'saved.model'
model.save(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tf.keras.models.load_model(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = loaded_model(snake_case__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1E-5 )
| 105 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
A_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=0 ) -> Union[str, Any]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = 2
A = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , )
A = floats_tensor(control_image.shape , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : str = StableDiffusionControlNetImgaImgPipeline
A_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__UpperCamelCase : Dict ):
if isinstance(__UpperCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__UpperCamelCase )
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__UpperCamelCase )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = MultiControlNetModel([controlneta, controlneta] )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = 2
A = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , ),
]
A = floats_tensor(control_image[0].shape , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
A = 1_0.0
A = 4
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __UpperCamelCase ( self : Dict ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : Any ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : Optional[int] ) -> int:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__UpperCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> str:
A = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
A = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__UpperCamelCase , controlnet=__UpperCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = 'evil space-punk bird'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
A = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
A = pipe(
__UpperCamelCase , __UpperCamelCase , control_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , num_inference_steps=50 , strength=0.6 , )
A = output.images[0]
assert image.shape == (512, 512, 3)
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2 | 106 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCamelCase__ : CLIPSegForImageSegmentation, UpperCamelCase__ : CLIPSegProcessor, UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : CLIPTextModel, UpperCamelCase__ : CLIPTokenizer, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCamelCase__ : StableDiffusionSafetyChecker, UpperCamelCase__ : CLIPImageProcessor, ) -> int:
super().__init__()
if hasattr(scheduler.config, 'steps_offset' ) and scheduler.config.steps_offset != 1:
_A = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = 1
_A = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config, 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_A = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set', '1.0.0', UpperCamelCase__, standard_warn=UpperCamelCase__ )
_A = dict(scheduler.config )
_A = True
_A = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase__, segmentation_processor=UpperCamelCase__, vae=UpperCamelCase__, text_encoder=UpperCamelCase__, tokenizer=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__, safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
self.enable_attention_slicing(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__, UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : int ) -> Dict:
if self.device != torch.device('meta' ) or not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int], UpperCamelCase__ : Union[str, List[str]], UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCamelCase__ : str, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 5_12, UpperCamelCase__ : int = 50, UpperCamelCase__ : float = 7.5, UpperCamelCase__ : Optional[Union[str, List[str]]] = None, UpperCamelCase__ : Optional[int] = 1, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : Optional[torch.Generator] = None, UpperCamelCase__ : Optional[torch.FloatTensor] = None, UpperCamelCase__ : Optional[str] = "pil", UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCamelCase__ : int = 1, **UpperCamelCase__ : Dict, ) -> Tuple:
_A = self.segmentation_processor(
text=[text], images=[image], padding='max_length', return_tensors='pt' ).to(self.device )
_A = self.segmentation_model(**UpperCamelCase__ )
_A = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_A = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_A = StableDiffusionInpaintPipeline(
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, )
return inpainting_pipeline(
prompt=UpperCamelCase__, image=UpperCamelCase__, mask_image=UpperCamelCase__, height=UpperCamelCase__, width=UpperCamelCase__, num_inference_steps=UpperCamelCase__, guidance_scale=UpperCamelCase__, negative_prompt=UpperCamelCase__, num_images_per_prompt=UpperCamelCase__, eta=UpperCamelCase__, generator=UpperCamelCase__, latents=UpperCamelCase__, output_type=UpperCamelCase__, return_dict=UpperCamelCase__, callback=UpperCamelCase__, callback_steps=UpperCamelCase__, )
| 107 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
_UpperCAmelCase = number_of_bytes // partitions
_UpperCAmelCase = []
for i in range(__snake_case ):
_UpperCAmelCase = i * bytes_per_partition + 1
_UpperCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**lowerCamelCase )
# save in new folder
model_config.save_pretrained(lowerCamelCase )
config.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,revision="""aaaaaa""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
class __a ( _snake_case ):
__UpperCamelCase : int = True
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 109 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=1000 ) -> List[Any]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE__ = n - 1
SCREAMING_SNAKE_CASE__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE__ = 0
while count < prec:
SCREAMING_SNAKE_CASE__ = random.randint(2 , n - 1 )
SCREAMING_SNAKE_CASE__ = bin_exp_mod(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if b != 1:
SCREAMING_SNAKE_CASE__ = True
for _ in range(UpperCamelCase_ ):
if b == n - 1:
SCREAMING_SNAKE_CASE__ = False
break
SCREAMING_SNAKE_CASE__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def snake_case__ ( _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
print("Loading config file..." )
def flatten_yaml_as_dict(_lowerCamelCase, _lowerCamelCase="", _lowerCamelCase="." ):
__lowercase : str = []
for k, v in d.items():
__lowercase : str = parent_key + sep + k if parent_key else k
if isinstance(_lowerCamelCase, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_lowerCamelCase, _lowerCamelCase, sep=_lowerCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(_lowerCamelCase )
__lowercase : List[Any] = argparse.Namespace()
with open(_lowerCamelCase, "r" ) as yaml_file:
try:
__lowercase : str = yaml.load(_lowerCamelCase, Loader=yaml.FullLoader )
__lowercase : Tuple = flatten_yaml_as_dict(_lowerCamelCase )
for k, v in flat_cfg.items():
setattr(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_lowerCamelCase, str(_lowerCamelCase ) ) )
return config
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : Any = MobileViTVaConfig()
__lowercase : List[Any] = False
# dataset
if task_name.startswith("imagenet1k_" ):
__lowercase : Optional[int] = 10_00
if int(task_name.strip().split("_" )[-1] ) == 3_84:
__lowercase : Tuple = 3_84
else:
__lowercase : List[str] = 2_56
__lowercase : int = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
__lowercase : List[str] = 2_10_00
if int(task_name.strip().split("_" )[-1] ) == 3_84:
__lowercase : Any = 3_84
else:
__lowercase : Dict = 2_56
__lowercase : List[Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
__lowercase : List[str] = 1_51
__lowercase : Optional[Any] = 5_12
__lowercase : int = "ade20k-id2label.json"
__lowercase : List[Any] = True
elif task_name.startswith("voc_" ):
__lowercase : Dict = 21
__lowercase : Dict = 5_12
__lowercase : Union[str, Any] = "pascal-voc-id2label.json"
__lowercase : Any = True
# orig_config
__lowercase : Union[str, Any] = load_orig_config_file(_lowerCamelCase )
assert getattr(_lowerCamelCase, "model.classification.name", -1 ) == "mobilevit_v2", "Invalid model"
__lowercase : int = getattr(_lowerCamelCase, "model.classification.mitv2.width_multiplier", 1.0 )
assert (
getattr(_lowerCamelCase, "model.classification.mitv2.attn_norm_layer", -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowercase : Optional[int] = getattr(_lowerCamelCase, "model.classification.activation.name", "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowercase : Optional[int] = getattr(_lowerCamelCase, "model.segmentation.output_stride", 16 )
if "_deeplabv3" in task_name:
__lowercase : List[Any] = getattr(_lowerCamelCase, "model.segmentation.deeplabv3.aspp_rates", [12, 24, 36] )
__lowercase : Any = getattr(_lowerCamelCase, "model.segmentation.deeplabv3.aspp_out_channels", 5_12 )
__lowercase : List[str] = getattr(_lowerCamelCase, "model.segmentation.deeplabv3.aspp_dropout", 0.1 )
# id2label
__lowercase : Dict = "huggingface/label-files"
__lowercase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase, _lowerCamelCase, repo_type="dataset" ), "r" ) )
__lowercase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase : Tuple = idalabel
__lowercase : Dict = {v: k for k, v in idalabel.items()}
return config
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[Any]:
"""simple docstring"""
__lowercase : List[Any] = dct.pop(_lowerCamelCase )
__lowercase : Tuple = val
def snake_case__ ( _lowerCamelCase, _lowerCamelCase=False ) ->Optional[Any]:
"""simple docstring"""
if base_model:
__lowercase : Any = ""
else:
__lowercase : List[str] = "mobilevitv2."
__lowercase : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowercase : Optional[Any] = k[8:]
else:
__lowercase : List[Any] = k
if ".block." in k:
__lowercase : Union[str, Any] = k_new.replace(".block.", "." )
if ".conv." in k:
__lowercase : int = k_new.replace(".conv.", ".convolution." )
if ".norm." in k:
__lowercase : Optional[int] = k_new.replace(".norm.", ".normalization." )
if "conv_1." in k:
__lowercase : Union[str, Any] = k_new.replace("conv_1.", F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
__lowercase : Union[str, Any] = k_new.replace(F'layer_{i}.', F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
__lowercase : Optional[int] = k_new.replace(".exp_1x1.", ".expand_1x1." )
if ".red_1x1." in k:
__lowercase : Optional[Any] = k_new.replace(".red_1x1.", ".reduce_1x1." )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
__lowercase : str = k_new.replace(F'layer_{i}.0.', F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
__lowercase : List[Any] = k_new.replace(F'layer_{i}.1.local_rep.0.', F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
__lowercase : List[str] = k_new.replace(F'layer_{i}.1.local_rep.1.', F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
__lowercase : Optional[int] = [0, 1]
elif i == 4:
__lowercase : Dict = [0, 1, 2, 3]
elif i == 5:
__lowercase : Any = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
__lowercase : List[str] = k_new.replace(
F'layer_{i}.1.global_rep.{j}.', F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
__lowercase : str = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.', F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
__lowercase : List[Any] = k_new.replace(F'layer_{i}.1.conv_proj.', F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
__lowercase : Any = k_new.replace("pre_norm_attn.0.", "layernorm_before." )
if "pre_norm_attn.1." in k:
__lowercase : List[Any] = k_new.replace("pre_norm_attn.1.", "attention." )
if "pre_norm_ffn.0." in k:
__lowercase : List[Any] = k_new.replace("pre_norm_ffn.0.", "layernorm_after." )
if "pre_norm_ffn.1." in k:
__lowercase : List[str] = k_new.replace("pre_norm_ffn.1.", "ffn.conv1." )
if "pre_norm_ffn.3." in k:
__lowercase : Any = k_new.replace("pre_norm_ffn.3.", "ffn.conv2." )
if "classifier.1." in k:
__lowercase : Union[str, Any] = k_new.replace("classifier.1.", "classifier." )
if "seg_head." in k:
__lowercase : Union[str, Any] = k_new.replace("seg_head.", "segmentation_head." )
if ".aspp_layer." in k:
__lowercase : Dict = k_new.replace(".aspp_layer.", "." )
if ".aspp_pool." in k:
__lowercase : List[Any] = k_new.replace(".aspp_pool.", "." )
rename_keys.append((k, k_new) )
return rename_keys
def snake_case__ ( _lowerCamelCase ) ->Dict:
"""simple docstring"""
__lowercase : List[str] = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(_lowerCamelCase )
for k in keys_to_ignore:
state_dict.pop(_lowerCamelCase, _lowerCamelCase )
def snake_case__ ( ) ->Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowercase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Dict:
"""simple docstring"""
__lowercase : Tuple = get_mobilevitva_config(_lowerCamelCase, _lowerCamelCase )
# load original state_dict
__lowercase : int = torch.load(_lowerCamelCase, map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
__lowercase : Any = MobileViTVaForSemanticSegmentation(_lowerCamelCase ).eval()
__lowercase : Optional[Any] = False
else:
__lowercase : Dict = MobileViTVaForImageClassification(_lowerCamelCase ).eval()
__lowercase : Optional[Any] = False
# remove and rename some keys of load the original model
__lowercase : int = checkpoint
remove_unused_keys(_lowerCamelCase )
__lowercase : str = create_rename_keys(_lowerCamelCase, base_model=_lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# load modified state_dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowercase : Tuple = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__lowercase : Optional[Any] = image_processor(images=prepare_img(), return_tensors="pt" )
__lowercase : List[str] = model(**_lowerCamelCase )
# verify classification model
if task_name.startswith("imagenet" ):
__lowercase : List[Any] = outputs.logits
__lowercase : Dict = logits.argmax(-1 ).item()
print("Predicted class:", model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowercase : Union[str, Any] = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3], _lowerCamelCase, atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__A : Dict = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 575 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __magic_name__ ( __lowercase ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :ArgumentParser ):
'''simple docstring'''
A_ : Optional[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase__ , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self :Union[str, Any] , snake_case :str , snake_case :str , snake_case :bool , snake_case :bool ):
'''simple docstring'''
A_ : int = model
A_ : Optional[int] = cache
A_ : List[str] = force
A_ : Any = trust_remote_code
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def __lowerCAmelCase ( lowercase : Any , lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return torch.atana(lowercase , lowercase ) / math.pi * 2
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Optional[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase , lowercase )
class _lowerCAmelCase ( __lowercase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case : Any = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
snake_case : List[str] = deepcopy(self.diffusion )
snake_case : Optional[Any] = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = MODELS_MAP[model_name]["url"]
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
__snake_case = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
__snake_case = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
__snake_case = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
__snake_case = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def __lowerCAmelCase ( lowercase : Dict ) -> str:
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCAmelCase ( lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase , lowercase ):
return name.replace(lowercase , lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase , lowercase ) for v in value]
raise ValueError(F'Attn error with {name}' )
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Union[str, Any]=13 ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
snake_case : Dict = 0
if string.startswith("net.3." ):
depth += 1
snake_case : str = string[6:]
elif string.startswith("net." ):
snake_case : Tuple = string[4:]
while string.startswith("main.7." ):
depth += 1
snake_case : Tuple = string[7:]
if string.startswith("main." ):
snake_case : str = string[5:]
# mid block
if string[:2].isdigit():
snake_case : Any = string[:2]
snake_case : Any = string[2:]
else:
snake_case : Dict = string[0]
snake_case : int = string[1:]
if depth == max_depth:
snake_case : Tuple = MID_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = "mid_block"
elif depth > 0 and int(lowercase ) < 7:
snake_case : Optional[Any] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = F'down_blocks.{depth}'
elif depth > 0 and int(lowercase ) > 7:
snake_case : List[str] = UP_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
snake_case : Tuple = DEPTH_0_TO_LAYER[layer_num]
snake_case : Optional[Any] = F'up_blocks.{max_depth - 1}' if int(lowercase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
snake_case : List[str] = string_left[1:]
if "resnets" in new_layer:
snake_case : Union[str, Any] = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : List[str] = convert_attn_naming(lowercase )
snake_case : int = new_string_left
if not isinstance(lowercase , lowercase ):
snake_case : Optional[Any] = prefix + "." + new_layer + "." + string_left
else:
snake_case : Tuple = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCAmelCase ( lowercase : List[Any] ) -> Dict:
"""simple docstring"""
snake_case : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : Tuple = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase , lowercase ):
snake_case : Union[str, Any] = transform_conv_attns(lowercase , lowercase , lowercase )
else:
snake_case : List[str] = v
return new_state_dict
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : Dict , lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : int = v[:, :, 0]
else:
# bias
snake_case : Tuple = v
else:
# qkv matrices
snake_case : Any = v.shape[0]
snake_case : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Optional[int] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCAmelCase ( lowercase : Any ) -> str:
"""simple docstring"""
snake_case : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case : str = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
snake_case : List[str] = download(lowercase )
snake_case : Any = MODELS_MAP[model_name]["sample_rate"]
snake_case : Optional[Any] = MODELS_MAP[model_name]["sample_size"]
snake_case : Tuple = Object()
snake_case : Optional[Any] = sample_size
snake_case : Dict = sample_rate
snake_case : Any = 0
snake_case : List[str] = UNetaDModel(sample_size=lowercase , sample_rate=lowercase )
snake_case : Dict = diffusers_model.state_dict()
snake_case : str = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase )["state_dict"] )
snake_case : List[Any] = orig_model.diffusion_ema.eval()
snake_case : List[Any] = orig_model.state_dict()
snake_case : Union[str, Any] = rename_orig_weights(lowercase )
snake_case : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Union[str, Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(lowercase ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
snake_case : Tuple = value.squeeze()
snake_case : Optional[Any] = value
diffusers_model.load_state_dict(lowercase )
snake_case : Any = 100
snake_case : Any = 33
snake_case : Dict = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Any = torch.manual_seed(lowercase )
snake_case : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=lowercase ).to(lowercase )
snake_case : int = torch.linspace(1 , 0 , steps + 1 , device=lowercase )[:-1]
snake_case : Any = get_crash_schedule(lowercase )
snake_case : Tuple = DanceDiffusionPipeline(unet=lowercase , scheduler=lowercase )
snake_case : Optional[Any] = torch.manual_seed(33 )
snake_case : Dict = pipe(num_inference_steps=lowercase , generator=lowercase ).audios
snake_case : List[Any] = sampling.iplms_sample(lowercase , lowercase , lowercase , {} )
snake_case : List[str] = generated.clamp(-1 , 1 )
snake_case : int = (generated - audio).abs().sum()
snake_case : Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowercase )
print("Diff max" , lowercase )
assert diff_max < 1e-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case = parser.parse_args()
main(args)
| 178 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
A = CLIPConfig
A = ['''CLIPEncoderLayer''']
def __init__( self : str , UpperCamelCase_ : CLIPConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
lowerCamelCase_ : List[str] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=0.5 , UpperCamelCase_ : Union[str, Any]=0.5 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.vision_model(UpperCamelCase__ )[0]
lowerCamelCase_ : int = self.p_head(UpperCamelCase__ )
lowerCamelCase_ : Tuple = nsfw_detected.flatten()
lowerCamelCase_ : Optional[int] = nsfw_detected > p_threshold
lowerCamelCase_ : Optional[Any] = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
lowerCamelCase_ : Any = np.zeros(images[idx].shape )
lowerCamelCase_ : Any = self.w_head(UpperCamelCase__ )
lowerCamelCase_ : Dict = watermark_detected.flatten()
lowerCamelCase_ : str = watermark_detected > w_threshold
lowerCamelCase_ : Optional[Any] = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
lowerCamelCase_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def snake_case_ ( self):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , a__):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCamelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCamelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCamelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'''files''': files}))
return splits
def snake_case_ ( self , a__):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCamelCase__).type
A__ = pa_table.append_column(UpperCamelCase__ , pa.array([None] * len(UpperCamelCase__) , type=UpperCamelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCamelCase__ , self.config.features.arrow_schema)
return pa_table
def snake_case_ ( self , a__):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCamelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCamelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCamelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCamelCase__)
yield file_idx, self._cast_table(UpperCamelCase__)
# If the file has one json object per line
else:
with open(UpperCamelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 3_2 , 1_6 << 1_0)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCamelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCamelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCamelCase__) , read_options=paj.ReadOptions(block_size=UpperCamelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCamelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCamelCase__)
or block_size > len(UpperCamelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(UpperCamelCase__)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCamelCase__)
except json.JSONDecodeError:
logger.error(F"Failed to read file \'{file}\' with error {type(UpperCamelCase__)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCamelCase__ , UpperCamelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCamelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCamelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file \'{file}\' with error {type(UpperCamelCase__)}: {e}")
raise ValueError(F"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(UpperCamelCase__)
break
else:
logger.error(F"Failed to read file \'{file}\' with error {type(UpperCamelCase__)}: {e}")
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
F"Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase__)
batch_idx += 1
| 632 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 ) -> float:
__lowercase : Dict = a
while True:
__lowercase : Union[str, Any] = Decimal(__lowerCAmelCase ) - (
Decimal(eval(__lowerCAmelCase ) ) / Decimal(eval(str(diff(__lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCAmelCase ) ) < precision: # noqa: S307
return float(__lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(F'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(F'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(F'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 509 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
class UpperCAmelCase_ (Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , a_ : T )-> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = data
UpperCAmelCase_ : Optional[int] = None
def __str__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return f'''{self.data}'''
class UpperCAmelCase_ (Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = None
def __iter__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase_ : int = self.top
while node:
yield node.data
UpperCAmelCase_ : Union[str, Any] = node.next
def __str__( self : Any )-> Optional[int]:
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict )-> Tuple:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def a ( self : List[str] )-> str:
"""simple docstring"""
return self.top is None
def a ( self : Dict , a_ : T )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = Node(UpperCamelCase__ )
if not self.is_empty():
UpperCAmelCase_ : Union[str, Any] = self.top
UpperCAmelCase_ : List[str] = node
def a ( self : Dict )-> List[Any]:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = self.top
UpperCAmelCase_ : Any = self.top.next
return pop_node.data
def a ( self : List[str] )-> List[Any]:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def a ( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 470 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :str )-> Tuple:
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(UpperCamelCase__ )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 440 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , ) -> Optional[int]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[str] ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
lowerCAmelCase__ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Any ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = DeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def a ( self : str ) -> Union[str, Any]:
pass
def a ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def a ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(UpperCamelCase__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self : Tuple ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def a ( self : str ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
lowerCAmelCase__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a ( self : List[Any] ) -> Dict:
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCAmelCase__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCAmelCase__ = model(**UpperCamelCase__ ).loss
loss.backward()
def a ( self : List[Any] ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCAmelCase__ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCAmelCase__ = model(**UpperCamelCase__ ).loss
loss.backward()
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
lowerCAmelCase__ = problem_type["title"]
lowerCAmelCase__ = problem_type["num_labels"]
lowerCAmelCase__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCAmelCase__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase__ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowerCAmelCase__ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def a ( self : Union[str, Any] ) -> List[str]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Union[str, Any] ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
UpperCamelCase__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase__ , return_tensors="pt" )
lowerCAmelCase__ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase__ )
| 61 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |