code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase__ : str = TypeVar("KT")
lowerCamelCase__ : Union[str, Any] = TypeVar("VT")
class lowercase__( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :KT | str = "root" , lowerCamelCase_ :VT | None = None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = key
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self :Optional[Any] ) -> str:
'''simple docstring'''
return f"Node({self.key}: {self.value})"
@property
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return len(self.forward )
class lowercase__( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :float = 0.5 , lowerCamelCase_ :int = 16 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : int = p
SCREAMING_SNAKE_CASE : Optional[Any] = max_level
def __str__( self :int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = list(self )
if len(lowerCamelCase_ ) == 0:
return f"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE : str = max((len(str(lowerCamelCase_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE : List[str] = max(lowerCamelCase_ , 4 ) + 4
SCREAMING_SNAKE_CASE : int = self.head
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Any = node.forward.copy()
lines.append(f"[{node.key}]".ljust(lowerCamelCase_ , '''-''' ) + '''* ''' * len(lowerCamelCase_ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowerCamelCase_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : List[Any] = node.forward[0]
lines.append(
f"[{node.key}]".ljust(lowerCamelCase_ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = node.forward
lines.append('''None'''.ljust(lowerCamelCase_ ) + '''* ''' * len(lowerCamelCase_ ) )
return f"SkipList(level={self.level})\n" + "\n".join(lowerCamelCase_ )
def __iter__( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Dict = node.forward[0]
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowerCAmelCase ( self :int , lowerCamelCase_ :str ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : int = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCamelCase_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowerCAmelCase ( self :int , lowerCamelCase_ :KT ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self._locate_node(lowerCamelCase_ )
if node is not None:
for i, update_node in enumerate(lowerCamelCase_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward[i]
else:
SCREAMING_SNAKE_CASE : Optional[int] = update_node.forward[:i]
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :KT , lowerCamelCase_ :VT ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self._locate_node(lowerCamelCase_ )
if node is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowerCamelCase_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : Any = level
SCREAMING_SNAKE_CASE : List[Any] = Node(lowerCamelCase_ , lowerCamelCase_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = new_node
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :VT ) -> VT | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self._locate_node(lowerCamelCase_ )
if node is not None:
return node.value
return None
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
SCREAMING_SNAKE_CASE : Optional[int] = skip_list.head
SCREAMING_SNAKE_CASE : Tuple = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : List[Any] = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
assert len(a_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
SCREAMING_SNAKE_CASE : Optional[Any] = skip_list.head
SCREAMING_SNAKE_CASE : Dict = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : List[Any] = node.forward[0]
SCREAMING_SNAKE_CASE : int = node.value
if len(a_ ) != 4:
print()
assert len(a_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
assert skip_list.find('''Some key''' ) is None
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 1_42 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(a_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(a_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A ( )-> Optional[Any]:
'''simple docstring'''
def is_sorted(a_ : Optional[Any] ):
return all(next_item >= item for item, next_item in zip(a_ , lst[1:] ) )
SCREAMING_SNAKE_CASE : str = SkipList()
for i in range(10 ):
skip_list.insert(a_ , a_ )
assert is_sorted(list(a_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(a_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(a_ ) )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 698 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 1 |
"""simple docstring"""
lowerCamelCase__ : Tuple = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase__ : List[Any] = ["a", "b", "c", "d", "e"]
def __A ( a_ : Dict , a_ : Any , a_ : List[Any] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = start
# add current to visited
visited.append(a_ )
SCREAMING_SNAKE_CASE : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE : Optional[int] = topological_sort(a_ , a_ , a_ )
# if all neighbors visited add current to sort
sort.append(a_ )
# if all vertices haven't been visited select a new one to visit
if len(a_ ) != len(a_ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE : Optional[int] = topological_sort(a_ , a_ , a_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase__ : Any = topological_sort("a", [], [])
print(sort)
| 698 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowercase__:
'''simple docstring'''
def __init__( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {}
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :float ) -> None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
if nodea not in self.connections:
self.add_node(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = probability
def __lowerCAmelCase ( self :Dict ) -> list[str]:
'''simple docstring'''
return list(self.connections )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __A ( a_ : str , a_ : list[tuple[str, str, float]] , a_ : int )-> dict[str, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE : List[Any] = start
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : Tuple = graph.transition(a_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
SCREAMING_SNAKE_CASE : List[str] = '''xvjiarui/stable-diffusion-2-inpainting'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = '''Face of a yellow cat, high resolution, sitting on a park bench'''
SCREAMING_SNAKE_CASE : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Tuple = 50
SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Optional[int] = num_samples * [init_image]
SCREAMING_SNAKE_CASE : Dict = num_samples * [mask_image]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = pipeline.prepare_inputs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# shard inputs and rng
SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCamelCase_ , jax.device_count() )
SCREAMING_SNAKE_CASE : Tuple = shard(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = shard(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pipeline(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images.reshape(lowerCamelCase_ , 5_12 , 5_12 , 3 )
SCREAMING_SNAKE_CASE : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 698 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls :Optional[int] ) -> List[str]:
'''simple docstring'''
return cls()
@dataclass
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
return True
@register_to_config
def __init__( self :List[str] , lowerCamelCase_ :float = 0.0_2 , lowerCamelCase_ :float = 1_00 , lowerCamelCase_ :float = 1.0_0_7 , lowerCamelCase_ :float = 80 , lowerCamelCase_ :float = 0.0_5 , lowerCamelCase_ :float = 50 , ) -> int:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :KarrasVeSchedulerState , lowerCamelCase_ :int , lowerCamelCase_ :Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = jnp.arange(0 , lowerCamelCase_ )[::-1].copy()
SCREAMING_SNAKE_CASE : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCamelCase_ , schedule=jnp.array(lowerCamelCase_ , dtype=jnp.floataa ) , timesteps=lowerCamelCase_ , )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :KarrasVeSchedulerState , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :float , lowerCamelCase_ :random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE : str = random.split(lowerCamelCase_ , num=1 )
SCREAMING_SNAKE_CASE : Any = self.config.s_noise * random.normal(key=lowerCamelCase_ , shape=sample.shape )
SCREAMING_SNAKE_CASE : str = sigma + gamma * sigma
SCREAMING_SNAKE_CASE : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :KarrasVeSchedulerState , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase_ , derivative=lowerCamelCase_ , state=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :KarrasVeSchedulerState , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE : Dict = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase_ , derivative=lowerCamelCase_ , state=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :KarrasVeSchedulerState , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 698 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = IFInpaintingSuperResolutionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
UpperCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
self._test_save_load_local()
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __A ( a_ : Optional[Any] , a_ : int , a_ : Tuple=0 )-> Any:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
SCREAMING_SNAKE_CASE : Dict = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
SCREAMING_SNAKE_CASE : List[Any] = fmt.format(a_ )
# Print and recurse (if needed).
if isinstance(a_ , a_ ):
if msg is not None:
print(a_ )
for k in val.keys():
recursive_print(a_ , val[k] , spaces + 2 )
elif isinstance(a_ , torch.Tensor ):
print(a_ , ''':''' , val.size() )
else:
print(a_ , ''':''' , a_ )
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Dict , a_ : Tuple )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE : Dict = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE : Tuple = param.view(*a_ )
SCREAMING_SNAKE_CASE : str = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE : Any = param.view(*a_ )
SCREAMING_SNAKE_CASE : Tuple = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = param.view(*a_ )
return param
def __A ( a_ : Any , a_ : Union[str, Any] , a_ : Optional[int] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE : Any = input_state_dict.get('''args''' , a_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE : Dict = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE : List[str] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = ds_args.hidden_size
SCREAMING_SNAKE_CASE : List[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE : Any = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE : Union[str, Any] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE : Dict = input_state_dict['''checkpoint_version''']
else:
SCREAMING_SNAKE_CASE : List[Any] = 0.0
# The model.
SCREAMING_SNAKE_CASE : List[str] = input_state_dict['''model''']
# The language model.
SCREAMING_SNAKE_CASE : Optional[int] = model['''language_model''']
# The embeddings.
SCREAMING_SNAKE_CASE : Dict = lm['''embedding''']
# The word embeddings.
SCREAMING_SNAKE_CASE : Any = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE : List[str] = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE : Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
SCREAMING_SNAKE_CASE : List[str] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE : List[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
SCREAMING_SNAKE_CASE : Dict = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE : Optional[int] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE : Any = layer_re.match(a_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE : Tuple = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE : Tuple = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE : Tuple = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
SCREAMING_SNAKE_CASE : Any = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
SCREAMING_SNAKE_CASE : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = masked_bias
SCREAMING_SNAKE_CASE : Dict = fix_query_key_value_ordering(a_ , a_ , 3 , a_ , a_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : List[str] = fix_query_key_value_ordering(a_ , a_ , 3 , a_ , a_ )
# Store. No change of shape.
SCREAMING_SNAKE_CASE : Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE : Any = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE : Optional[int] = transformer['''final_layernorm.weight''']
SCREAMING_SNAKE_CASE : List[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE : int = word_embeddings
# It should be done!
return output_state_dict
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=a_ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=a_ , help='''An optional config json file describing the pre-trained model.''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(a_ , map_location='''cpu''' )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Tuple = input_state_dict.get('''args''' , a_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE : int = '''gelu_fast'''
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE : int = '''gelu_new'''
else:
SCREAMING_SNAKE_CASE : Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE : Any = '''gelu_new'''
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE : Dict = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=a_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=a_ , summary_activation=a_ , summary_proj_to_labels=a_ , summary_first_dropout=0.1 , scale_attn_weights=a_ , use_cache=a_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
SCREAMING_SNAKE_CASE : Dict = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE : Optional[int] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
SCREAMING_SNAKE_CASE : Optional[Any] = convert_megatron_checkpoint(a_ , a_ , a_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(a_ , a_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE : List[str] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE : Any = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
SCREAMING_SNAKE_CASE : str = '''gpt2'''
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = type(a_ ).__name__
SCREAMING_SNAKE_CASE : Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(a_ )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(a_ )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , '''pytorch_model.bin''' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(a_ , a_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : int | float | str , a_ : int | float | str )-> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE : str = int(a_ )
SCREAMING_SNAKE_CASE : List[Any] = int(a_ )
SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(a_ ) ):
series.append(F"1 / {pow(temp + 1 , int(a_ ) )}" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : int = int(input("Enter the last number (nth term) of the P-Series"))
lowerCamelCase__ : Dict = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 698 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """roc_bert"""
def __init__( self :Dict , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :List[Any]=30_72 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Dict=0.0_2 , lowerCamelCase_ :Optional[Any]=1E-12 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :str="absolute" , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=9_10 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :str=2_48_58 , lowerCamelCase_ :int=True , **lowerCamelCase_ :str , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = enable_pronunciation
SCREAMING_SNAKE_CASE : str = enable_shape
SCREAMING_SNAKE_CASE : Tuple = pronunciation_embed_dim
SCREAMING_SNAKE_CASE : int = pronunciation_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = shape_embed_dim
SCREAMING_SNAKE_CASE : Optional[Any] = shape_vocab_size
SCREAMING_SNAKE_CASE : Dict = concat_input
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 698 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ : str = 50000
lowerCamelCase__ : List[Any] = 5000
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = os.path.split(__file__)
lowerCamelCase__ : List[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __A ( a_ : datasets.Dataset , a_ : Any )-> Tuple:
'''simple docstring'''
for i in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = dataset[i]
@get_duration
def __A ( a_ : datasets.Dataset , a_ : str , a_ : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
for i in range(0 , len(a_ ) , a_ ):
SCREAMING_SNAKE_CASE : Tuple = dataset[i : i + batch_size]
@get_duration
def __A ( a_ : datasets.Dataset , a_ : Any , a_ : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
with dataset.formatted_as(type=a_ ):
for i in range(a_ ):
SCREAMING_SNAKE_CASE : str = dataset[i]
@get_duration
def __A ( a_ : datasets.Dataset , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
with dataset.formatted_as(type=a_ ):
for i in range(0 , a_ , a_ ):
SCREAMING_SNAKE_CASE : List[str] = dataset[i : i + batch_size]
def __A ( )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
SCREAMING_SNAKE_CASE : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
SCREAMING_SNAKE_CASE : Optional[int] = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
SCREAMING_SNAKE_CASE : Dict = generate_example_dataset(
os.path.join(a_ , '''dataset.arrow''' ) , a_ , num_examples=a_ , seq_shapes={'''list''': (1_00,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(a_ ) )
SCREAMING_SNAKE_CASE : str = func(a_ , **a_ )
print('''shuffling dataset''' )
SCREAMING_SNAKE_CASE : Tuple = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(a_ ) )
SCREAMING_SNAKE_CASE : List[str] = func(
a_ , **a_ )
with open(a_ , '''wb''' ) as f:
f.write(json.dumps(a_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 698 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :NestedDataStructureLike[PathLike] , lowerCamelCase_ :Optional[NamedSplit] = None , lowerCamelCase_ :Optional[Features] = None , lowerCamelCase_ :str = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[int] = None , **lowerCamelCase_ :Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = field
SCREAMING_SNAKE_CASE : Union[str, Any] = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[Any] = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Any = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Dataset , lowerCamelCase_ :Union[PathLike, BinaryIO] , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None , **lowerCamelCase_ :Optional[Any] , ) -> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : List[Any] = path_or_buf
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE : str = num_proc
SCREAMING_SNAKE_CASE : Dict = '''utf-8'''
SCREAMING_SNAKE_CASE : Union[str, Any] = to_json_kwargs
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.to_json_kwargs.pop('''path_or_buf''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE : List[str] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE : Optional[Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE : str = self.to_json_kwargs.pop('''compression''' , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCamelCase_ ) as buffer:
SCREAMING_SNAKE_CASE : int = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE : List[str] = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = args
SCREAMING_SNAKE_CASE : Dict = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE : Tuple = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :BinaryIO , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , **lowerCamelCase_ :Union[str, Any] , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 698 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :str ) -> Tuple:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE : List[str] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , only_pretrain_model=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(lowerCamelCase_ , [config] )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(lowerCamelCase_ , [config] )
SCREAMING_SNAKE_CASE : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(lowerCamelCase_ , [config] )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = TensorFlowBenchmark(lowerCamelCase_ , configs=[config] )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase_ , save_to_csv=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(lowerCamelCase_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(lowerCamelCase_ , '''env.csv''' ) , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(lowerCamelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , '''env.csv''' ) ).exists() )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCamelCase_ :Dict ):
self.assertTrue(hasattr(lowerCamelCase_ , '''sequential''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''cumulative''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''current''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase_ , '''log.txt''' ) , log_print=lowerCamelCase_ , trace_memory_line_by_line=lowerCamelCase_ , eager_mode=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , '''log.txt''' ) ).exists() )
| 698 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Dict = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 1 |
"""simple docstring"""
def __A ( a_ : int )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :int = 0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = key
def __lowerCAmelCase ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE : int = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 698 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """informer"""
UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :Union[str, Any] , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :str = "student_t" , lowerCamelCase_ :str = "nll" , lowerCamelCase_ :int = 1 , lowerCamelCase_ :List[int] = None , lowerCamelCase_ :Optional[Union[str, bool]] = "mean" , lowerCamelCase_ :int = 0 , lowerCamelCase_ :int = 0 , lowerCamelCase_ :int = 0 , lowerCamelCase_ :int = 0 , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 32 , lowerCamelCase_ :int = 32 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :int = 2 , lowerCamelCase_ :bool = True , lowerCamelCase_ :str = "gelu" , lowerCamelCase_ :float = 0.0_5 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :float = 0.0_2 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str = "prob" , lowerCamelCase_ :int = 5 , lowerCamelCase_ :bool = True , **lowerCamelCase_ :List[Any] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = prediction_length
SCREAMING_SNAKE_CASE : Optional[Any] = context_length or prediction_length
SCREAMING_SNAKE_CASE : str = distribution_output
SCREAMING_SNAKE_CASE : Any = loss
SCREAMING_SNAKE_CASE : str = input_size
SCREAMING_SNAKE_CASE : Any = num_time_features
SCREAMING_SNAKE_CASE : List[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : Optional[int] = scaling
SCREAMING_SNAKE_CASE : str = num_dynamic_real_features
SCREAMING_SNAKE_CASE : Optional[int] = num_static_real_features
SCREAMING_SNAKE_CASE : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE : Optional[Any] = cardinality
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_dimension
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE : List[Any] = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Tuple = use_cache
# Informer
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = sampling_factor
SCREAMING_SNAKE_CASE : Dict = distil
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 698 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 1 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GPTSanJapaneseTokenizer
UpperCamelCase = False
UpperCamelCase = {"""do_clean_text""": False, """add_prefix_space""": False}
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : int = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE : Optional[int] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
SCREAMING_SNAKE_CASE : List[Any] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[str] , **lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE : Any = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __lowerCAmelCase ( self :int , lowerCamelCase_ :str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.get_input_output_texts(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE : Any = '''こんにちは、世界。 こんばんは、㔺界。'''
SCREAMING_SNAKE_CASE : str = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE : int = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
SCREAMING_SNAKE_CASE : Any = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE : Dict = '''こんにちは、世界。'''
SCREAMING_SNAKE_CASE : Optional[int] = '''こんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''こんにちは、世界。こんばんは、世界。😀'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCamelCase_ , prefix_text=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE : Tuple = '''こんにちは、世界。'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''こんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE : Tuple = len(tokenizer.encode(lowerCamelCase_ ) ) - 2
SCREAMING_SNAKE_CASE : Optional[int] = len(tokenizer.encode(lowerCamelCase_ ) ) - 2
SCREAMING_SNAKE_CASE : str = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE : str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCamelCase_ , prefix_text=lowerCamelCase_ ).token_type_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode('''あンいワ''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCamelCase_ ) , tokenizer.decode(lowerCamelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCamelCase_ ) , tokenizer.decode(lowerCamelCase_ ) )
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE : Dict = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_encode_plus(lowerCamelCase_ , padding=lowerCamelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : str = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
SCREAMING_SNAKE_CASE : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE : List[str] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCamelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCamelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCamelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
pass
| 698 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __A ( a_ : str , a_ : str , a_ : str , a_ : PreTrainedTokenizer , a_ : int , a_ : Optional[int] = None , )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
if train_file is not None:
SCREAMING_SNAKE_CASE : Tuple = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE : Any = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = [test_file]
SCREAMING_SNAKE_CASE : Any = datasets.load_dataset('''csv''' , data_files=a_ )
SCREAMING_SNAKE_CASE : Any = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE : str = features_name.pop(a_ )
SCREAMING_SNAKE_CASE : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {label: i for i, label in enumerate(a_ )}
SCREAMING_SNAKE_CASE : str = tokenizer.model_input_names
SCREAMING_SNAKE_CASE : int = {}
if len(a_ ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE : Any = ds[k].map(
lambda a_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=a_ , max_length=a_ , padding='''max_length''' ) , batched=a_ , )
elif len(a_ ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE : Tuple = ds[k].map(
lambda a_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=a_ , max_length=a_ , padding='''max_length''' , ) , batched=a_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE : List[Any] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE : List[str] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE : int = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE : Tuple = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE : List[str] = (
tf.data.Dataset.from_generator(
a_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """The path of the training file"""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """The path of the development file"""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """The path of the test file"""} )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=a_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(a_ ) , labelaid=a_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , )
def compute_metrics(a_ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE : List[str] = TFTrainer(
model=a_ , args=a_ , train_dataset=a_ , eval_dataset=a_ , compute_metrics=a_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : str = trainer.evaluate()
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(a_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(a_ )
return results
if __name__ == "__main__":
main()
| 698 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ : Any = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ : str = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __A ( a_ : str )-> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , a_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a_ ) )
| 698 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __A ( a_ : np.ndarray , a_ : np.ndarray )-> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a_ , a_ ) ) )
def __A ( a_ : np.ndarray , a_ : np.ndarray )-> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''Wrong input data\'s dimensions... '''
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(a_ )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE : Optional[int] = (
'''Wrong input data\'s shape... '''
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(a_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE : List[Any] = (
'''Input data have different datatype... '''
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(a_ )
SCREAMING_SNAKE_CASE : int = []
for value in value_array:
SCREAMING_SNAKE_CASE : Any = euclidean(a_ , dataset[0] )
SCREAMING_SNAKE_CASE : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE : Optional[int] = euclidean(a_ , a_ )
if dist > temp_dist:
SCREAMING_SNAKE_CASE : Optional[int] = temp_dist
SCREAMING_SNAKE_CASE : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __A ( a_ : np.ndarray , a_ : np.ndarray )-> float:
'''simple docstring'''
return np.dot(a_ , a_ ) / (norm(a_ ) * norm(a_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GPTaTokenizer
UpperCamelCase = GPTaTokenizerFast
UpperCamelCase = True
UpperCamelCase = {"""add_prefix_space""": True}
UpperCamelCase = False
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : Optional[int] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] , **lowerCamelCase_ :int ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , **lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''lower newer'''
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : List[str] = '''lower newer'''
SCREAMING_SNAKE_CASE : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing the unknown token
SCREAMING_SNAKE_CASE : List[str] = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :int , *lowerCamelCase_ :Dict , **lowerCamelCase_ :str ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[int]=15 ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# Simple input
SCREAMING_SNAKE_CASE : str = '''This is a simple input'''
SCREAMING_SNAKE_CASE : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE : Tuple = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE : Tuple = '''This is a simple input'''
SCREAMING_SNAKE_CASE : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE : Dict = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCamelCase_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Dict = tokenizer(*lowerCamelCase_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , truncate=lowerCamelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''$$$'''
SCREAMING_SNAKE_CASE : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase_ , add_bos_token=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = '''This is a simple input'''
SCREAMING_SNAKE_CASE : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.get_tokenizer(do_lower_case=lowerCamelCase_ , add_bos_token=lowerCamelCase_ )]
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : List[str] = '''Encode this.'''
SCREAMING_SNAKE_CASE : str = '''This one too please.'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
encoded_sequence += tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode_plus(
lowerCamelCase_ , lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = encoded_sequence_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_ )
]
SCREAMING_SNAKE_CASE : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : str = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''./test_opt''' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(
lowerCamelCase_ , )
# Same as above
self.assertEqual(lowerCamelCase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = '''bos'''
SCREAMING_SNAKE_CASE : str = tokenizer.get_vocab()['''bos''']
SCREAMING_SNAKE_CASE : List[Any] = '''A photo of a cat'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
lowerCamelCase_ , )
# We changed the bos token
self.assertEqual(lowerCamelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(
lowerCamelCase_ , )
self.assertEqual(lowerCamelCase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 698 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 1 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 698 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __A ( a_ : Dict , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = checkpoint
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : Any = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE : str = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE : int = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE : List[str] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE : List[Any] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(a_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE : List[str] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE : Tuple = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(a_ )
}
for i in range(a_ ):
SCREAMING_SNAKE_CASE : str = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : str = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
SCREAMING_SNAKE_CASE : Dict = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
SCREAMING_SNAKE_CASE : List[Any] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : List[str] = {'''old''': F"down.{i}.block", '''new''': F"down_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : List[Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Dict = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : int = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : int = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : str = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : Union[str, Any] = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : str = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE : List[str] = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
SCREAMING_SNAKE_CASE : str = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
SCREAMING_SNAKE_CASE : Optional[Any] = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : Tuple = {'''old''': F"up.{block_id}.block", '''new''': F"up_blocks.{i}.resnets"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : Any = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
SCREAMING_SNAKE_CASE : str = renew_vae_resnet_paths(a_ )
SCREAMING_SNAKE_CASE : List[str] = {'''old''': F"mid.block_{i}", '''new''': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
SCREAMING_SNAKE_CASE : str = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE : List[Any] = renew_vae_attention_paths(a_ )
SCREAMING_SNAKE_CASE : str = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
conv_attn_to_linear(a_ )
return new_checkpoint
def __A ( a_ : str , a_ : str , )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE : int = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE : Any = OmegaConf.load(a_ )
SCREAMING_SNAKE_CASE : str = 5_12
SCREAMING_SNAKE_CASE : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE : List[str] = {}
with safe_open(a_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = f.get_tensor(a_ )
else:
SCREAMING_SNAKE_CASE : Any = torch.load(a_ , map_location=a_ )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE : Any = create_vae_diffusers_config(a_ , image_size=a_ )
SCREAMING_SNAKE_CASE : Dict = custom_convert_ldm_vae_checkpoint(a_ , a_ )
SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL(**a_ )
vae.load_state_dict(a_ )
vae.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase__ : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMTokenizer
UpperCamelCase = False
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''lower newer'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = XLMTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE : Any = '''lower'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 698 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 2
@register_to_config
def __init__( self :List[Any] , lowerCamelCase_ :float = 0.0_2 , lowerCamelCase_ :float = 1_00 , lowerCamelCase_ :float = 1.0_0_7 , lowerCamelCase_ :float = 80 , lowerCamelCase_ :float = 0.0_5 , lowerCamelCase_ :float = 50 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = sigma_max
# setable values
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : np.IntTensor = None
SCREAMING_SNAKE_CASE : torch.FloatTensor = None # sigma(t_i)
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, torch.device] = None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
SCREAMING_SNAKE_CASE : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE : int = torch.tensor(lowerCamelCase_ , dtype=torch.floataa , device=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE : List[str] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase_ ).to(sample.device )
SCREAMING_SNAKE_CASE : List[Any] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE : Tuple = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase_ , derivative=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase_ , derivative=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ : List[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :int=1 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tokenizer
SCREAMING_SNAKE_CASE : Any = dataset
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE : Dict = n_copies
def __iter__( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
SCREAMING_SNAKE_CASE : str = self.tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = start_length
SCREAMING_SNAKE_CASE : Dict = eof_strings
SCREAMING_SNAKE_CASE : int = tokenizer
def __call__( self :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def __A ( a_ : str )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = re.split('''(%s)''' % '''|'''.join(a_ ) , a_ )
# last string should be ""
return "".join(string_list[:-2] )
def __A ( a_ : List[str] , a_ : str , a_ : str , a_ : Dict , a_ : Optional[Any] , a_ : str=20 , **a_ : Tuple )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = defaultdict(a_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_ ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE : List[str] = accelerator.unwrap_model(a_ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=a_ , **a_ )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE : Optional[int] = batch['''task_id'''].repeat(a_ )
SCREAMING_SNAKE_CASE : Any = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE : Any = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_ ):
gen_token_dict[task].append(a_ )
SCREAMING_SNAKE_CASE : Dict = [[] for _ in range(a_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
code_gens[task].append(remove_last_block(a_ ) )
return code_gens
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(a_ )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE : str = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE : Optional[int] = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE : List[str] = Accelerator()
set_seed(args.seed , device_specific=a_ )
# Load model and tokenizer
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE : int = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_ )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE : Dict = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE : List[str] = TokenizedDataset(a_ , human_eval['''test'''] , n_copies=a_ , n_tasks=a_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE : Dict = DataLoader(a_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE : Optional[int] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = accelerator.prepare(a_ , a_ )
SCREAMING_SNAKE_CASE : int = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE : int = []
for task in tqdm(range(a_ ) ):
SCREAMING_SNAKE_CASE : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE : List[str] = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(a_ , a_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('''This is me''' , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE : List[Any] = model.generate(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE : Dict = model_reloaded.generate(**lowerCamelCase_ )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase_ ):
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase_ )
| 698 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """instructblip_vision_model"""
def __init__( self :Any , lowerCamelCase_ :Dict=14_08 , lowerCamelCase_ :Tuple=61_44 , lowerCamelCase_ :Union[str, Any]=39 , lowerCamelCase_ :List[Any]=16 , lowerCamelCase_ :int=2_24 , lowerCamelCase_ :Any=14 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=1E-6 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Tuple=1E-10 , lowerCamelCase_ :str=True , **lowerCamelCase_ :Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = qkv_bias
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , lowerCamelCase_ :Union[str, os.PathLike] , **lowerCamelCase_ :Any ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """instructblip_qformer"""
def __init__( self :int , lowerCamelCase_ :Union[str, Any]=3_05_22 , lowerCamelCase_ :int=7_68 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=5_12 , lowerCamelCase_ :Any=0.0_2 , lowerCamelCase_ :Union[str, Any]=1E-12 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Union[str, Any]="absolute" , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[int]=14_08 , **lowerCamelCase_ :Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = cross_attention_frequency
SCREAMING_SNAKE_CASE : List[Any] = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls :Optional[int] , lowerCamelCase_ :Union[str, os.PathLike] , **lowerCamelCase_ :Any ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE : int = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """instructblip"""
UpperCamelCase = True
def __init__( self :Tuple , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Tuple=32 , **lowerCamelCase_ :Tuple ) -> str:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE : int = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE : Dict = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE : List[str] = InstructBlipVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = InstructBlipQFormerConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAPPING[text_model_type](**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE : Dict = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE : Optional[Any] = num_query_tokens
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE : str = 1.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_2
@classmethod
def __lowerCAmelCase ( cls :Union[str, Any] , lowerCamelCase_ :InstructBlipVisionConfig , lowerCamelCase_ :InstructBlipQFormerConfig , lowerCamelCase_ :PretrainedConfig , **lowerCamelCase_ :Optional[int] , ) -> Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase_ , )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE : str = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 698 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""transformers""", """torch""", """note_seq"""]
def __init__( self :List[str] , *lowerCamelCase_ :int , **lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls :Dict , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :str ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls :Any , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 698 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 1 |
"""simple docstring"""
import os
def __A ( )-> str:
'''simple docstring'''
with open(os.path.dirname(a_ ) + '''/grid.txt''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
SCREAMING_SNAKE_CASE : int = 0
# right
for i in range(20 ):
for j in range(17 ):
SCREAMING_SNAKE_CASE : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
SCREAMING_SNAKE_CASE : int = temp
# down
for i in range(17 ):
for j in range(20 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
SCREAMING_SNAKE_CASE : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
SCREAMING_SNAKE_CASE : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
SCREAMING_SNAKE_CASE : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
SCREAMING_SNAKE_CASE : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
SCREAMING_SNAKE_CASE : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 698 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : List[str] = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 1 |
"""simple docstring"""
import cmath
import math
def __A ( a_ : float , a_ : float , a_ : float , a_ : float )-> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = math.radians(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = math.radians(a_ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE : Optional[int] = cmath.rect(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = cmath.rect(a_ , a_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __A ( a_ : float , a_ : int )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = u
for i in range(1 , a_ ):
SCREAMING_SNAKE_CASE : List[Any] = temp * (u - i)
return temp
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('''enter the numbers of values: ''' ) )
SCREAMING_SNAKE_CASE : list[list[float]] = []
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
SCREAMING_SNAKE_CASE : Any = 0
print('''enter the values of parameters in a list: ''' )
SCREAMING_SNAKE_CASE : List[Any] = list(map(a_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : str = float(input() )
SCREAMING_SNAKE_CASE : int = int(input('''enter the value to interpolate: ''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE : str = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE : List[str] = y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 698 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bit"""
UpperCamelCase = ["""preactivation""", """bottleneck"""]
UpperCamelCase = ["""SAME""", """VALID"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=64 , lowerCamelCase_ :Any=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :Union[str, Any]=[3, 4, 6, 3] , lowerCamelCase_ :Dict="preactivation" , lowerCamelCase_ :List[Any]="relu" , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Union[str, Any]=32 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Any=False , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :str , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE : Any = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = embedding_size
SCREAMING_SNAKE_CASE : Dict = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Tuple = layer_type
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = global_padding
SCREAMING_SNAKE_CASE : int = num_groups
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : Any = embedding_dynamic_padding
SCREAMING_SNAKE_CASE : List[Any] = output_stride
SCREAMING_SNAKE_CASE : List[str] = width_factor
SCREAMING_SNAKE_CASE : Any = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def __A ( a_ : Tuple , a_ : Dict=1.0 , a_ : List[str]=None , a_ : Tuple=None )-> int:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :List[Any]=4_00 , lowerCamelCase_ :List[str]=20_00 , lowerCamelCase_ :str=1 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Any=1_60_00 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=True , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : int = feature_size
SCREAMING_SNAKE_CASE : List[str] = padding_value
SCREAMING_SNAKE_CASE : Tuple = sampling_rate
SCREAMING_SNAKE_CASE : Optional[int] = return_attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
def _flatten(lowerCamelCase_ :List[Any] ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Any = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ASTFeatureExtractor
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ASTFeatureExtractionTester(self )
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
@require_torch
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(1_00 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Optional[int] = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : List[str] = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 698 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """swinv2"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :str , lowerCamelCase_ :str=2_24 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :str=96 , lowerCamelCase_ :Optional[Any]=[2, 2, 6, 2] , lowerCamelCase_ :str=[3, 6, 12, 24] , lowerCamelCase_ :Optional[Any]=7 , lowerCamelCase_ :List[str]=4.0 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Tuple=1E-5 , lowerCamelCase_ :Any=32 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : List[Any] = window_size
SCREAMING_SNAKE_CASE : Dict = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : int = (0, 0, 0, 0)
| 698 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """MobileNetV1Config"""
# Base docstring
SCREAMING_SNAKE_CASE__ : int = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ : Tuple = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ : Tuple = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( snake_case, snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :str = {}
if isinstance(snake_case, snake_case ):
__magic_name__ :List[str] = model.mobilenet_va
else:
__magic_name__ :Union[str, Any] = model
__magic_name__ :List[Any] = '''MobilenetV1/Conv2d_0/'''
__magic_name__ :Union[str, Any] = backbone.conv_stem.convolution.weight
__magic_name__ :str = backbone.conv_stem.normalization.bias
__magic_name__ :Tuple = backbone.conv_stem.normalization.weight
__magic_name__ :str = backbone.conv_stem.normalization.running_mean
__magic_name__ :List[Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__magic_name__ :List[str] = i + 1
__magic_name__ :int = i * 2
__magic_name__ :Union[str, Any] = backbone.layer[pt_index]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__magic_name__ :Optional[int] = pointer.convolution.weight
__magic_name__ :Union[str, Any] = pointer.normalization.bias
__magic_name__ :Dict = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :List[str] = pointer.normalization.running_var
__magic_name__ :Tuple = backbone.layer[pt_index + 1]
__magic_name__ :Any = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__magic_name__ :Tuple = pointer.convolution.weight
__magic_name__ :int = pointer.normalization.bias
__magic_name__ :int = pointer.normalization.weight
__magic_name__ :Dict = pointer.normalization.running_mean
__magic_name__ :int = pointer.normalization.running_var
if isinstance(snake_case, snake_case ):
__magic_name__ :Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__magic_name__ :Optional[int] = model.classifier.weight
__magic_name__ :List[Any] = model.classifier.bias
return tf_to_pt_map
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__magic_name__ :Optional[Any] = tf.train.list_variables(snake_case )
__magic_name__ :Optional[Any] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__magic_name__ :Optional[Any] = tf.train.load_variable(snake_case, snake_case )
__magic_name__ :str = array
# Build TF to PyTorch weights loading map
__magic_name__ :Any = _build_tf_to_pytorch_map(snake_case, snake_case, snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__magic_name__ :List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__magic_name__ :Any = np.transpose(snake_case, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ :Dict = array.squeeze().transpose()
else:
__magic_name__ :Optional[Any] = np.transpose(snake_case, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__magic_name__ :List[str] = torch.from_numpy(snake_case )
tf_weights.pop(snake_case, snake_case )
tf_weights.pop(name + '''/RMSProp''', snake_case )
tf_weights.pop(name + '''/RMSProp_1''', snake_case )
tf_weights.pop(name + '''/ExponentialMovingAverage''', snake_case )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :int = features.shape[-2:]
__magic_name__ , __magic_name__ :Optional[Any] = conv_layer.stride
__magic_name__ , __magic_name__ :Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ :Optional[int] = max(kernel_height - stride_height, 0 )
else:
__magic_name__ :List[Any] = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__magic_name__ :Any = max(kernel_width - stride_width, 0 )
else:
__magic_name__ :str = max(kernel_width - (in_width % stride_width), 0 )
__magic_name__ :Tuple = pad_along_width // 2
__magic_name__ :Any = pad_along_width - pad_left
__magic_name__ :Union[str, Any] = pad_along_height // 2
__magic_name__ :str = pad_along_height - pad_top
__magic_name__ :Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case, snake_case, '''constant''', 0.0 )
class lowerCamelCase_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
"""simple docstring"""
super().__init__()
__magic_name__ :List[str] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__magic_name__ :Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ :Union[str, Any] = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase , groups=__lowerCAmelCase , bias=__lowerCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
__magic_name__ :Tuple = nn.BatchNormad(
num_features=__lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__lowerCAmelCase , track_running_stats=__lowerCAmelCase , )
else:
__magic_name__ :Optional[Any] = None
if use_activation:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCAmelCase ):
__magic_name__ :Any = ACTaFN[config.hidden_act]
else:
__magic_name__ :Union[str, Any] = config.hidden_act
else:
__magic_name__ :List[Any] = None
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config.tf_padding:
__magic_name__ :Optional[Any] = apply_tf_padding(__lowerCAmelCase , self.convolution )
__magic_name__ :str = self.convolution(__lowerCAmelCase )
if self.normalization is not None:
__magic_name__ :Optional[int] = self.normalization(__lowerCAmelCase )
if self.activation is not None:
__magic_name__ :int = self.activation(__lowerCAmelCase )
return features
class lowerCamelCase_ ( lowerCamelCase ):
a__ = MobileNetVaConfig
a__ = load_tf_weights_in_mobilenet_va
a__ = '''mobilenet_v1'''
a__ = '''pixel_values'''
a__ = False
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__ : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ : str = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = True ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = config
__magic_name__ :Optional[Any] = 3_2
__magic_name__ :List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ :List[Any] = MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=config.num_channels , out_channels=__lowerCAmelCase , kernel_size=3 , stride=2 , )
__magic_name__ :Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ :Dict = nn.ModuleList()
for i in range(1_3 ):
__magic_name__ :Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ :Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=1 , ) )
__magic_name__ :Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ :Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__magic_name__ :str = self.conv_stem(__lowerCAmelCase )
__magic_name__ :Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ :int = layer_module(__lowerCAmelCase )
if output_hidden_states:
__magic_name__ :List[str] = all_hidden_states + (hidden_states,)
__magic_name__ :Tuple = hidden_states
if self.pooler is not None:
__magic_name__ :Optional[int] = torch.flatten(self.pooler(__lowerCAmelCase ) , start_dim=1 )
else:
__magic_name__ :Optional[int] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase , )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__magic_name__ :Optional[Any] = config.num_labels
__magic_name__ :List[str] = MobileNetVaModel(__lowerCAmelCase )
__magic_name__ :Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ :int = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = nn.Linear(__lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ :List[str] = self.mobilenet_va(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
__magic_name__ :str = outputs.pooler_output if return_dict else outputs[1]
__magic_name__ :Any = self.classifier(self.dropout(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ :List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ :Union[str, Any] = '''single_label_classification'''
else:
__magic_name__ :Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__magic_name__ :int = MSELoss()
if self.num_labels == 1:
__magic_name__ :Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ :str = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
__magic_name__ :Optional[Any] = CrossEntropyLoss()
__magic_name__ :int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ :Optional[Any] = BCEWithLogitsLoss()
__magic_name__ :Union[str, Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
__magic_name__ :Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 0 |
from __future__ import annotations
from collections import namedtuple
def _A ( _lowercase , _lowercase , _lowercase ) -> tuple:
"""simple docstring"""
__UpperCamelCase = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase_ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
UpperCAmelCase_ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
UpperCAmelCase_ = """zero2"""
UpperCAmelCase_ = """zero3"""
UpperCAmelCase_ = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :Union[str, Any] ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_A = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( _A):
"""simple docstring"""
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> Dict:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> str:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCAmelCase , name_func=__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
self.run_and_check(
stage=__lowerCAmelCase , model=__lowerCAmelCase , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> int:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any:
_A = models[model]
_A = self.run_trainer(
stage=__lowerCAmelCase , model_name=__lowerCAmelCase , eval_steps=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , fpaa=__lowerCAmelCase , )
self.do_checks(__lowerCAmelCase )
return output_dir
def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , ) -> Any:
_A = self.get_auto_remove_tmp_dir('''./xxx''' , after=__lowerCAmelCase )
_A = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_A = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_A = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_A = self.get_launcher(__lowerCAmelCase )
_A = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
return output_dir
def snake_case_ ( self : List[str] , __lowerCAmelCase : Dict=False ) -> Tuple:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_A = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 2 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""})
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self , A_ , A_=0 )-> int:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 3 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''luke'''
def __init__( self , _snake_case=5_02_67 , _snake_case=50_00_00 , _snake_case=7_68 , _snake_case=2_56 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=True , _snake_case=None , _snake_case=1 , _snake_case=0 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 4 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 0 |
'''simple docstring'''
from math import factorial
_lowercase = {str(d): factorial(d) for d in range(10)}
def A (__lowerCamelCase :int ):
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCamelCase ) )
def A ():
_lowerCAmelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCamelCase ) if sum_of_digit_factorial(__lowerCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase_ :
def __init__( self :int , __A :Optional[Any] , __A :Any=13 , __A :Optional[int]=7 , __A :str=True , __A :Tuple=True , __A :Union[str, Any]=True , __A :int=True , __A :Any=99 , __A :Union[str, Any]=64 , __A :int=32 , __A :Tuple=5 , __A :int=4 , __A :Tuple=37 , __A :Optional[Any]="gelu" , __A :Union[str, Any]=0.1 , __A :Any=0.1 , __A :str=512 , __A :Tuple=16 , __A :Optional[Any]=2 , __A :Tuple=0.0_2 , __A :int=3 , __A :Union[str, Any]=4 , __A :List[Any]=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _snake_case ( self :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Union[str, Any] , __A :int , __A :str , __A :List[Any] , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self :List[Any] , __A :List[str] , __A :Optional[Any] , __A :Optional[int] , __A :List[str] , __A :List[Any] , __A :int , __A :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :List[Any] , __A :Any , __A :Tuple , __A :str , __A :str , __A :List[str] , __A :Union[str, Any] , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :int , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :int , __A :Union[str, Any] , __A :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Union[str, Any] , __A :str , __A :Any , __A :Union[str, Any] , __A :List[Any] , __A :Any , __A :str , __A :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Optional[Any] , __A :int , __A :Optional[Any] , __A :List[Any] , __A :Tuple , __A :Any , __A :Union[str, Any] , __A :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :str , __A :int , __A :List[Any] , __A :Tuple , __A :Optional[Any] , __A :Optional[int] , __A :Dict , __A :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :Optional[Any] , __A :List[Any] , __A :List[Any] , __A :Any , __A :Tuple , __A :Any , __A :Optional[int] , __A :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :Optional[Any] , __A :int , __A :List[Any] , __A :List[str] , __A :Any , __A :List[Any] , __A :Any , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
# test_resize_embeddings = False
lowerCamelCase_ = False
def _snake_case ( self :int , __A :Tuple , __A :List[str] , __A :int=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
_lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ = os.path.join(os.environ["""MYDIR"""] , __A )
SCREAMING_SNAKE_CASE__ = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
SCREAMING_SNAKE_CASE__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ = output[0, ii, jj]
SCREAMING_SNAKE_CASE__ = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ = """ii={} jj={} a={} b={}""".format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A ) | 6 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
"""simple docstring"""
a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
_A = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a = [None] * 10_000_000
a = True
a = False
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_A = chain(next_number(_snake_case ) )
_A = number_chain
while number < 10_00_00_00:
_A = number_chain
number *= 10
return number_chain
def _snake_case ( _snake_case : int = 10_00_00_00 ) -> int:
'''simple docstring'''
for i in range(1 , _snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 7 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : int = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
A__ = b.T
A__ = np.sum(np.square(__UpperCamelCase ) , axis=1 )
A__ = np.sum(np.square(__UpperCamelCase ) , axis=0 )
A__ = np.matmul(__UpperCamelCase , __UpperCamelCase )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(__UpperCamelCase , __UpperCamelCase )
return np.argmin(__UpperCamelCase , axis=1 )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ["pixel_values"]
def __init__( self : List[str] , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : bool = True , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'height': 2_56, 'width': 2_56}
A__ = get_size_dict(_snake_case )
A__ = np.array(_snake_case ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def _a ( self : int , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_snake_case , size=(size['height'], size['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Any , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
A__ = rescale(image=_snake_case , scale=1 / 127.5 , data_format=_snake_case )
A__ = image - 1
return image
def _a ( self : List[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_snake_case : int , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(_snake_case )
A__ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A__ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(image=_snake_case ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(_snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(_snake_case )
A__ = color_quantize(_snake_case , _snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(_snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(_snake_case )
else:
A__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A__ = {'input_ids': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 9 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input train ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether ot not to use whole word mask."} )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCAmelCase = field(
default=1 / 6, metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
}, )
UpperCAmelCase = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
UpperCAmelCase = field(
default=-1, metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( __snake_case , __snake_case , __snake_case = False , __snake_case = None , ):
def _dataset(__snake_case , __snake_case=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , ref_path=__snake_case , )
return LineByLineTextDataset(tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = AutoModelWithLMHead.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , evaluate=__snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=__snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=__snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , data_collator=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , prediction_loss_only=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = math.exp(eval_output['''eval_loss'''] )
_UpperCamelCase = {'''perplexity''': perplexity}
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
for i in range(len(__A) - 1 , 0 , -1):
_a = False
for j in range(__A , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
_a , _a = unsorted[j - 1], unsorted[j]
_a = True
for j in range(__A):
if unsorted[j] > unsorted[j + 1]:
_a , _a = unsorted[j + 1], unsorted[j]
_a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> List[str]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase_ ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> List[Any]:
__lowerCamelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowerCamelCase : Tuple = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__lowerCamelCase : List[Any] = PipelineDataFormat.from_str(
format=UpperCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCAmelCase_ , UpperCAmelCase_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Tuple = nlp
__lowerCamelCase : Dict = reader
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : str = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=SCREAMING_SNAKE_CASE_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=SCREAMING_SNAKE_CASE_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=SCREAMING_SNAKE_CASE_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=SCREAMING_SNAKE_CASE_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=SCREAMING_SNAKE_CASE_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> int:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self._nlp, []
for entry in self._reader:
__lowerCamelCase : Tuple = nlp(**SCREAMING_SNAKE_CASE_ ) if self._reader.is_multi_columns else nlp(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
outputs.append(SCREAMING_SNAKE_CASE_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowerCamelCase : List[str] = self._reader.save_binary(SCREAMING_SNAKE_CASE_ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(SCREAMING_SNAKE_CASE_ )
| 13 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
'''simple docstring'''
A__ = None
A__ = False
A__ = False
A__ = False
A__ = None
A__ = None
A__ = False
A__ = False
A__ = False
A__ = True
A__ = None
A__ = 1
A__ = None
A__ = False
A__ = None
A__ = None
def lowerCamelCase__ (self : int ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(_UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 15 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A : Optional[Any] = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = {}
state_dict.pop("pixel_mean" , A__ )
state_dict.pop("pixel_std" , A__ )
SCREAMING_SNAKE_CASE = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
SCREAMING_SNAKE_CASE = int(re.match(A__ , A__ ).group(2 ) )
if layer_nb == 0:
SCREAMING_SNAKE_CASE = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
SCREAMING_SNAKE_CASE = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
SCREAMING_SNAKE_CASE = key.replace("layers.2" , "proj_out" )
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __a ( A__ : Union[str, Any] , A__ : Tuple , A__ : int , A__ : str="ybelkada/segment-anything" ):
SCREAMING_SNAKE_CASE = hf_hub_download(A__ , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
SCREAMING_SNAKE_CASE = SamConfig(
vision_config=A__ , )
elif "sam_vit_h" in model_name:
SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
SCREAMING_SNAKE_CASE = SamConfig(
vision_config=A__ , )
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = replace_keys(A__ )
SCREAMING_SNAKE_CASE = SamImageProcessor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=A__ )
SCREAMING_SNAKE_CASE = SamModel(A__ )
hf_model.load_state_dict(A__ )
SCREAMING_SNAKE_CASE = hf_model.to("cuda" )
SCREAMING_SNAKE_CASE = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE = [[[400, 650]]]
SCREAMING_SNAKE_CASE = [[1]]
SCREAMING_SNAKE_CASE = processor(images=np.array(A__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
SCREAMING_SNAKE_CASE = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
SCREAMING_SNAKE_CASE = processor(images=np.array(A__ ) , input_boxes=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
SCREAMING_SNAKE_CASE = [[1, 1]]
SCREAMING_SNAKE_CASE = processor(
images=np.array(A__ ) , input_points=A__ , input_labels=A__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**A__ )
SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
__A : Dict = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__A : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id) | 16 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[Any] = '''deformable_detr'''
_lowercase : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , __A : int=True , __A : Any=None , __A : int=3 , __A : Dict=300 , __A : Optional[int]=1024 , __A : Any=6 , __A : Optional[int]=1024 , __A : Optional[int]=8 , __A : Dict=6 , __A : Optional[Any]=1024 , __A : Optional[Any]=8 , __A : str=0.0 , __A : List[Any]=True , __A : Optional[int]="relu" , __A : Any=256 , __A : Union[str, Any]=0.1 , __A : List[str]=0.0 , __A : Union[str, Any]=0.0 , __A : Optional[Any]=0.0_2 , __A : Union[str, Any]=1.0 , __A : str=True , __A : List[str]=False , __A : Tuple="sine" , __A : List[str]="resnet50" , __A : Optional[int]=True , __A : Union[str, Any]=False , __A : str=4 , __A : Optional[Any]=4 , __A : List[Any]=4 , __A : str=False , __A : List[str]=300 , __A : List[Any]=False , __A : Optional[Any]=1 , __A : Union[str, Any]=5 , __A : Dict=2 , __A : Tuple=1 , __A : int=1 , __A : List[Any]=5 , __A : List[Any]=2 , __A : Any=0.1 , __A : Union[str, Any]=0.2_5 , __A : Optional[Any]=False , **__A : Any , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__A : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
__A : Any = backbone_config.get("""model_type""" )
__A : str = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(__A )
__A : List[Any] = use_timm_backbone
__A : List[Any] = backbone_config
__A : Tuple = num_channels
__A : Union[str, Any] = num_queries
__A : str = max_position_embeddings
__A : Any = d_model
__A : Any = encoder_ffn_dim
__A : Dict = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Tuple = decoder_ffn_dim
__A : Dict = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : List[Any] = dropout
__A : int = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Optional[Any] = activation_function
__A : List[str] = init_std
__A : int = init_xavier_std
__A : Dict = encoder_layerdrop
__A : List[Any] = auxiliary_loss
__A : Tuple = position_embedding_type
__A : Dict = backbone
__A : Any = use_pretrained_backbone
__A : List[Any] = dilation
# deformable attributes
__A : Optional[Any] = num_feature_levels
__A : str = encoder_n_points
__A : Any = decoder_n_points
__A : Tuple = two_stage
__A : Any = two_stage_num_proposals
__A : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__A : Tuple = class_cost
__A : Dict = bbox_cost
__A : List[str] = giou_cost
# Loss coefficients
__A : Any = mask_loss_coefficient
__A : Any = dice_loss_coefficient
__A : Optional[Any] = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : str = eos_coefficient
__A : Optional[Any] = focal_alpha
__A : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=__A , **__A )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.d_model
def lowerCAmelCase_ ( self : str ):
__A : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Optional[Any] = self.backbone_config.to_dict()
__A : List[Any] = self.__class__.model_type
return output
| 17 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
'''simple docstring'''
import math
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=0 ) -> str: # a graph with Node 0,1,...,N-1
_lowerCAmelCase = n
_lowerCAmelCase = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
_lowerCAmelCase = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = w
def _snake_case ( self ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_lowerCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return self.dp[u][v]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 0 |
import argparse
from collections import defaultdict
def _lowercase( __a : Union[str, Any] , __a : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] ):
a__ =f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =f"""class {class_name}("""
a__ =f"""{4 * ' '}def {test_name}("""
a__ =f"""{8 * ' '}{correct_line.split()[0]}"""
a__ =f"""{16 * ' '}{correct_line.split()[0]}"""
a__ =False
a__ =False
a__ =False
a__ =False
a__ =0
a__ =0
a__ =[]
for line in lines:
if line.startswith(__a ):
a__ =True
elif in_class and line.startswith(__a ):
a__ =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
a__ =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
a__ =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
a__ =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
a__ =a__ =a__ =a__ =False
else:
new_lines.append(__a )
with open(__a , 'w' ) as f:
for line in new_lines:
f.write(__a )
def _lowercase( __a : int , __a : Union[str, Any]=None ):
if fail is not None:
with open(__a , 'r' ) as f:
a__ ={l.strip() for l in f.readlines()}
else:
a__ =None
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =defaultdict(__a )
for line in correct_lines:
a__ , a__ , a__ , a__ =line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_lowerCAmelCase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase_ : List[str] = "src/transformers"
UpperCAmelCase_ : List[str] = "docs/source/en"
UpperCAmelCase_ : str = "."
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__magic_name__ : Optional[int] =f.readlines()
# Find the start prompt.
__magic_name__ : Dict =0
while not lines[start_index].startswith(lowerCamelCase ):
start_index += 1
start_index += 1
__magic_name__ : Union[str, Any] =start_index
while not lines[end_index].startswith(lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase_ : Any = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : Any = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
UpperCAmelCase_ : Tuple = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : Optional[int] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase )
return [m.group(0 ) for m in matches]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =2 if text == """✅""" or text == """❌""" else len(lowerCamelCase )
__magic_name__ : Optional[Any] =(width - text_length) // 2
__magic_name__ : str =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase_ ( ):
__magic_name__ : Union[str, Any] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ : Optional[int] ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ : Union[str, Any] ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ : Any =collections.defaultdict(lowerCamelCase )
__magic_name__ : Union[str, Any] =collections.defaultdict(lowerCamelCase )
__magic_name__ : List[Any] =collections.defaultdict(lowerCamelCase )
__magic_name__ : int =collections.defaultdict(lowerCamelCase )
__magic_name__ : Dict =collections.defaultdict(lowerCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase ):
__magic_name__ : Dict =None
if attr_name.endswith("""Tokenizer""" ):
__magic_name__ : Optional[Any] =slow_tokenizers
__magic_name__ : str =attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__magic_name__ : Tuple =fast_tokenizers
__magic_name__ : Dict =attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase ) is not None:
__magic_name__ : List[str] =tf_models
__magic_name__ : List[str] =_re_tf_models.match(lowerCamelCase ).groups()[0]
elif _re_flax_models.match(lowerCamelCase ) is not None:
__magic_name__ : Tuple =flax_models
__magic_name__ : Tuple =_re_flax_models.match(lowerCamelCase ).groups()[0]
elif _re_pt_models.match(lowerCamelCase ) is not None:
__magic_name__ : List[Any] =pt_models
__magic_name__ : Any =_re_pt_models.match(lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ : Optional[int] =True
break
# Try again after removing the last word in the name
__magic_name__ : Union[str, Any] ="""""".join(camel_case_split(lowerCamelCase )[:-1] )
# Let's build that table!
__magic_name__ : List[str] =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ : Union[str, Any] =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ : Optional[int] =[len(lowerCamelCase ) + 2 for c in columns]
__magic_name__ : int =max([len(lowerCamelCase ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ : List[Any] ="""|""" + """|""".join([_center_text(lowerCamelCase , lowerCamelCase ) for c, w in zip(lowerCamelCase , lowerCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__magic_name__ : Optional[int] ={True: """✅""", False: """❌"""}
for name in model_names:
__magic_name__ : Optional[Any] =model_name_to_prefix[name]
__magic_name__ : Optional[Any] =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase , lowerCamelCase ) for l, w in zip(lowerCamelCase , lowerCamelCase )] ) + "|\n"
return table
def lowerCAmelCase_ ( lowerCamelCase=False ):
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =_find_text_in_file(
filename=os.path.join(lowerCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
__magic_name__ : Any =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase_ : Dict = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = [10, 20, 30, 40, 50, 60]
_a = [2, 4, 6, 8, 10, 12]
_a = 1_00
self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_10 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Weight can not be negative.''' )
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Profit can not be negative.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(
lowerCAmelCase_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 22 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = dataset
UpperCamelCase_ = process
UpperCamelCase_ = params
def __len__( self ) -> Optional[Any]:
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.dataset[i]
UpperCamelCase_ = self.process(_UpperCAmelCase , **self.params )
return processed
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[int]:
UpperCamelCase_ = loader
UpperCamelCase_ = infer
UpperCamelCase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase_ = None
UpperCamelCase_ = loader_batch_size
# Internal bookkeeping
UpperCamelCase_ = None
UpperCamelCase_ = None
def __len__( self ) -> List[Any]:
return len(self.loader )
def __iter__( self ) -> List[Any]:
UpperCamelCase_ = iter(self.loader )
return self
def _UpperCAmelCase ( self ) -> List[str]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Convert ModelOutput to tuple first
UpperCamelCase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase_ = self._loader_batch_data.__class__(_UpperCAmelCase )
self._loader_batch_index += 1
return result
def _UpperCAmelCase ( self ) -> Optional[int]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase_ = next(self.iterator )
UpperCamelCase_ = self.infer(_UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_UpperCAmelCase , torch.Tensor ):
UpperCamelCase_ = processed
else:
UpperCamelCase_ = list(processed.keys() )[0]
UpperCamelCase_ = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = len(_UpperCAmelCase )
else:
UpperCamelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase_ = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase_ = processed
UpperCamelCase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> List[str]:
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __iter__( self ) -> Tuple:
UpperCamelCase_ = iter(self.loader )
UpperCamelCase_ = None
return self
def _UpperCAmelCase ( self ) -> List[Any]:
if self.subiterator is None:
UpperCamelCase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCamelCase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase_ = self.infer(next(self.iterator ) , **self.params )
UpperCamelCase_ = next(self.subiterator )
return processed
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __iter__( self ) -> Tuple:
UpperCamelCase_ = iter(self.loader )
return self
def _UpperCAmelCase ( self ) -> Any:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCamelCase_ = False
UpperCamelCase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase_ = self.loader_batch_item()
UpperCamelCase_ = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
UpperCamelCase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_UpperCAmelCase , torch.Tensor ):
UpperCamelCase_ = processed
else:
UpperCamelCase_ = list(processed.keys() )[0]
UpperCamelCase_ = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = len(_UpperCAmelCase )
else:
UpperCamelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase_ = observed_batch_size
UpperCamelCase_ = processed
UpperCamelCase_ = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase_ = self.loader_batch_item()
UpperCamelCase_ = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
else:
UpperCamelCase_ = processed
UpperCamelCase_ = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
return accumulator
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = dataset
UpperCamelCase_ = key
def __len__( self ) -> Optional[int]:
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ) -> Union[str, Any]:
return self.dataset[i][self.key]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = dataset
UpperCamelCase_ = keya
UpperCamelCase_ = keya
def __len__( self ) -> int:
return len(self.dataset )
def __getitem__( self , _UpperCAmelCase ) -> List[str]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : Any , **a : str ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCamelCase ( self : Tuple , a : int , a : Any , a : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self : int , a : List[str] , a : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a )
import datasets
SCREAMING_SNAKE_CASE : Tuple = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : List[Any] = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "Intel/dpt-large"
SCREAMING_SNAKE_CASE : Optional[int] = pipeline("depth-estimation" , model=a )
SCREAMING_SNAKE_CASE : str = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" ) | 25 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
_A, _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A, _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'bird'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_A = pipe.prepare_image_inputs([canny_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = replicate(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A, _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A, _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'Chef in the kitchen'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_A = pipe.prepare_image_inputs([pose_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = replicate(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 27 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, **A ):
'''simple docstring'''
super().__init__(**A )
requires_backends(self, 'vision' )
requires_backends(self, 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Tuple = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE : List[Any] = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE : str = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE : Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE : Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE : Tuple = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE : Tuple = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE : Dict = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE : Optional[int] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE : int = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE : List[str] = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE : Dict = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self, A, *A, A=None, A=None, **A ):
'''simple docstring'''
return super().__call__(A, *A, num_workers=A, batch_size=A, **A )
def UpperCamelCase_ ( self, A, A=64, A = 0, A = 512 / 1_500, A = 32, A = 1, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = load_image(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.size['longest_edge']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.image_processor.generate_crop_boxes(
A, A, A, A, A, A )
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=A, return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE : int = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE : Optional[int] = self._ensure_tensor_on_device(A, device=self.device )
SCREAMING_SNAKE_CASE : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
SCREAMING_SNAKE_CASE : str = image_embeddings
SCREAMING_SNAKE_CASE : List[str] = grid_points.shape[1]
SCREAMING_SNAKE_CASE : Dict = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0, A, A ):
SCREAMING_SNAKE_CASE : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE : str = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ ( self, A, A=0.88, A=0.95, A=0, A=1, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('input_boxes' )
SCREAMING_SNAKE_CASE : Any = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop('original_sizes' ).tolist()
SCREAMING_SNAKE_CASE : Dict = model_inputs.pop('reshaped_input_sizes' ).tolist()
SCREAMING_SNAKE_CASE : List[str] = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE : List[str] = model_outputs['pred_masks']
SCREAMING_SNAKE_CASE : Dict = self.image_processor.post_process_masks(
A, A, A, A, binarize=A )
SCREAMING_SNAKE_CASE : List[str] = model_outputs['iou_scores']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], A, A, A, A, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ ( self, A, A=False, A=False, A=0.7, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
SCREAMING_SNAKE_CASE : str = torch.cat(A )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.post_process_for_mask_generation(
A, A, A, A )
SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 28 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 29 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a = logging.get_logger(__name__)
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
lowerCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a__ ( self ) -> int:
UpperCAmelCase_ : int = self.task_name.lower()
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''train'''
lowerCAmelCase = '''dev'''
lowerCAmelCase = '''test'''
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = Split.train ,_SCREAMING_SNAKE_CASE = None ,) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ,_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = args
UpperCAmelCase_ : Optional[Any] = glue_processors[args.task_name]()
UpperCAmelCase_ : int = glue_output_modes[args.task_name]
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase_ : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
UpperCAmelCase_ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' ,)
UpperCAmelCase_ : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_, UpperCAmelCase_ : str = label_list[2], label_list[1]
UpperCAmelCase_ : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Dict = cached_features_file + '''.lock'''
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
UpperCAmelCase_ : List[Any] = time.time()
UpperCAmelCase_ : Dict = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
UpperCAmelCase_ : List[str] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_ : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_ : Optional[int] = examples[:limit_length]
UpperCAmelCase_ : int = glue_convert_examples_to_features(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,max_length=args.max_seq_length ,label_list=_SCREAMING_SNAKE_CASE ,output_mode=self.output_mode ,)
UpperCAmelCase_ : Union[str, Any] = time.time()
torch.save(self.features ,_SCREAMING_SNAKE_CASE )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
def a__ ( self ) -> Any:
return self.label_list | 30 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "roberta"
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=50_265 , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Dict=1E-12 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 31 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ):
_UpperCAmelCase = OpenAIGPTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , head_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ):
_UpperCAmelCase = OpenAIGPTLMHeadModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ):
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__A : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__A : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase , )
_UpperCAmelCase = inputs_dict['''labels''']
_UpperCAmelCase = inputs_dict['''labels''']
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_UpperCamelCase , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , n_embd=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_UpperCamelCase ) # the president is
_UpperCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(_UpperCamelCase , do_sample=_UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , _UpperCamelCase ) | 32 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase = DatasetDict(
{
'''train''': dataset['''train'''].select(_lowercase ),
'''validation''': dataset['''train'''].select(_lowercase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_lowercase ,max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
_lowercase ,batched=_lowercase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
_lowercase ,padding='''longest''' ,max_length=_lowercase ,pad_to_multiple_of=_lowercase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['''test'''] ,shuffle=_lowercase ,collate_fn=_lowercase ,batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = []
# Download the dataset
UpperCamelCase = load_dataset('''glue''' ,'''mrpc''' )
# Create our splits
UpperCamelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['''lr''']
UpperCamelCase = int(config['''num_epochs'''] )
UpperCamelCase = int(config['''seed'''] )
UpperCamelCase = int(config['''batch_size'''] )
UpperCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
UpperCamelCase = kfold.split(np.zeros(datasets['''train'''].num_rows ) ,datasets['''train''']['''label'''] )
UpperCamelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_fold_dataloaders(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() ,lr=_lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase ,num_warmup_steps=100 ,num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_lowercase ,references=_lowercase ,)
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,_lowercase )
# New Code #
# We also run predictions on the test set at the very end
UpperCamelCase = []
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCamelCase = torch.cat(_lowercase ,dim=0 )
UpperCamelCase = torch.stack(_lowercase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCamelCase = metric.compute(predictions=_lowercase ,references=_lowercase )
accelerator.print('''Average test metrics from all folds:''' ,_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_lowercase ,default=_lowercase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' ,type=_lowercase ,default=3 ,help='''The number of splits to perform across the dataset''' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase ,_lowercase )
if __name__ == "__main__":
main() | 34 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a_ :Any = TypeVar('T')
class lowercase ( Generic[T] ):
def __init__( self : Union[str, Any] , _lowercase : bool = True ):
SCREAMING_SNAKE_CASE__ : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE__ : int = directed
def lowercase__ ( self : Optional[Any] , _lowercase : T , _lowercase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
self.adj_list[destination_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE__ : List[str] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : Any = []
return self
def __repr__( self : Tuple ):
return pformat(self.adj_list )
| 35 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
import os
from datetime import datetime as dt
from github import Github
__lowercase : List[str] = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
snake_case : Optional[int] = g.get_repo("""huggingface/accelerate""" )
snake_case : str = repo.get_issues(state="""open""" )
for issue in open_issues:
snake_case : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
snake_case : Any = comments[0] if len(__A ) > 0 else None
snake_case : Optional[int] = dt.utcnow()
snake_case : Dict = (current_time - issue.updated_at).days
snake_case : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 36 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase : Optional[int] = logging.getLogger()
UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Tuple ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
a__ : Optional[Any] = {"source": "What is love ?", "target": "life"}
a__ : int = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a__ : Any = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , f'''{split}.{field}''' ) , "w" ) as f:
f.write(lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ):
a__ : List[Any] = self.get_auto_remove_tmp_dir()
a__ : Dict = os.path.join(lowerCamelCase__ , "output" )
a__ : List[Any] = os.path.join(lowerCamelCase__ , "data" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
a__ : Optional[Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
a__ : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
a__ : List[Any] = os.path.join(lowerCamelCase__ , "metrics.json" )
with open(lowerCamelCase__ ) as f:
a__ : str = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def _UpperCamelCase( self : List[str] ):
a__ : List[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _UpperCamelCase( self : Optional[Any] ):
a__ : Union[str, Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCamelCase( self : List[str] ):
a__ : int = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCamelCase( self : List[Any] ):
a__ : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 37 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 0 |
from __future__ import annotations
from collections import deque
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : list[str] ) ->Optional[int]:
snake_case_ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_UpperCamelCase )
self.set_fail_transitions()
def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : str ) ->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case__( self : Optional[Any] , _UpperCamelCase : str ) ->None:
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(_UpperCamelCase , _UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(_UpperCamelCase )
def snake_case__( self : List[Any] ) ->None:
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCamelCase )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCamelCase )
snake_case_ = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
snake_case_ = self.adlist[state]['''fail_state''']
snake_case_ = self.find_next_state(
_UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def snake_case__( self : Any , _UpperCamelCase : str ) ->dict[str, list[int]]:
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(_UpperCamelCase ) ):
while (
self.find_next_state(_UpperCamelCase , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]['''fail_state''']
snake_case_ = self.find_next_state(_UpperCamelCase , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(_UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=99 , lowerCamelCase_ :Optional[Any]=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Optional[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Dict = 3_84
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Any = 37
SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
SCREAMING_SNAKE_CASE : List[str] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE : int = 16
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : str = 1_28
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
if hasattr(lowerCamelCase_ , '''use_cache''' ):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''saved_model''' , '''1''' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.models.load_model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = outputs['''encoder_hidden_states''']
SCREAMING_SNAKE_CASE : str = outputs['''encoder_attentions''']
else:
SCREAMING_SNAKE_CASE : List[str] = outputs['''hidden_states''']
SCREAMING_SNAKE_CASE : List[Any] = outputs['''attentions''']
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , '''key_length''' , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : int = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ :Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 698 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=None, ) -> Tuple:
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Union[str, Any] = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : int = projection_dim
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : Optional[int] = dropout
UpperCamelCase : Union[str, Any] = attention_dropout
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : Union[str, Any] = bos_token_id
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase : int = input_mask.numpy()
UpperCamelCase , UpperCamelCase : Union[str, Any] = input_mask.shape
UpperCamelCase : Optional[int] = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = 1
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = TFBlipTextModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
pass
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def snake_case_ ( self ) -> int:
pass
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=True ) -> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE_ )
| 40 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int]=5_02_67 , lowerCamelCase_ :List[Any]=50_00_00 , lowerCamelCase_ :str=7_68 , lowerCamelCase_ :Optional[Any]=2_56 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=None , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :str=0 , lowerCamelCase_ :int=2 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = entity_vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Dict = entity_emb_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : str = classifier_dropout
| 698 | 0 |
'''simple docstring'''
import math
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ = SelfOrganizingMap()
lowerCamelCase_ = 3
lowerCamelCase_ = 0.5
for _ in range(__UpperCamelCase ):
for j in range(len(__UpperCamelCase ) ):
# training sample
lowerCamelCase_ = training_samples[j]
# Compute the winning vector
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# Update the winning vector
lowerCamelCase_ = self_organizing_map.update(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# classify test sample
lowerCamelCase_ = [0, 0, 0, 1]
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 42 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def is_in_circle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 1.0 ):
"""simple docstring"""
def identity_function(SCREAMING_SNAKE_CASE ) -> float:
return x
lowercase__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def function_to_integrate(SCREAMING_SNAKE_CASE ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : List[str] = 250004
lowerCamelCase__ : str = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''<s>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Dict = 1
return cls
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 698 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 44 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._create_example_records()
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_list(lowerCamelCase_ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase_ ):
self.assertDictEqual(lowerCamelCase_ , example_records[i] )
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._create_example_records()
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __lowerCAmelCase ( self :List[str] ) -> Dict: # checks what happens with missing columns
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(lowerCamelCase_ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(lowerCamelCase_ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 698 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = CLIPConfig
_snake_case : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self :Union[str, Any] , lowerCamelCase__ :CLIPConfig ):
super().__init__(lowerCamelCase__ )
UpperCamelCase__ :List[str] = CLIPVisionModelWithProjection(config.vision_config )
UpperCamelCase__ :Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCamelCase__ :Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __a ( self :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[int]=0.5 , lowerCamelCase__ :Tuple=0.5 ):
UpperCamelCase__ :Optional[int] = self.vision_model(lowerCamelCase__ )[0]
UpperCamelCase__ :str = self.p_head(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = nsfw_detected.flatten()
UpperCamelCase__ :Optional[int] = nsfw_detected > p_threshold
UpperCamelCase__ :Dict = nsfw_detected.tolist()
if any(lowerCamelCase__ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(lowerCamelCase__ ):
if nsfw_detected_:
UpperCamelCase__ :List[str] = np.zeros(images[idx].shape )
UpperCamelCase__ :Optional[Any] = self.w_head(lowerCamelCase__ )
UpperCamelCase__ :Dict = watermark_detected.flatten()
UpperCamelCase__ :List[str] = watermark_detected > w_threshold
UpperCamelCase__ :str = watermark_detected.tolist()
if any(lowerCamelCase__ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(lowerCamelCase__ ):
if watermark_detected_:
UpperCamelCase__ :Any = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 45 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : str = val
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_lowerCamelCase : str = value
else:
_lowerCamelCase : Any = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : str = ""
if is_panoptic:
_lowerCamelCase : Any = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : int = in_proj_weight[:256, :]
_lowerCamelCase : Dict = in_proj_bias[:256]
_lowerCamelCase : int = in_proj_weight[256:512, :]
_lowerCamelCase : Tuple = in_proj_bias[256:512]
_lowerCamelCase : Tuple = in_proj_weight[-256:, :]
_lowerCamelCase : Optional[int] = in_proj_bias[-256:]
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Optional[int] = "resnet101"
if "dc5" in model_name:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = "panoptic" in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 250
else:
_lowerCamelCase : Optional[Any] = 91
_lowerCamelCase : List[Any] = "huggingface/label-files"
_lowerCamelCase : List[Any] = "coco-detection-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : Any = "coco_panoptic" if is_panoptic else "coco_detection"
_lowerCamelCase : Tuple = ConditionalDetrImageProcessor(format=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : str = image_processor(images=_lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCamelCase , pretrained=_lowerCamelCase ).eval()
_lowerCamelCase : Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Tuple = "conditional_detr." + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Tuple = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_lowerCamelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Tuple = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_lowerCamelCase : int = state_dict.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Any = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_lowerCamelCase : List[Any] = conditional_detr(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = model(_lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 46 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : bool , lowerCamelCase_ : bool ):
def run_func(lowerCamelCase_ : str ):
@wraps(lowerCamelCase_ )
def run_in_eager_mode(*lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
@wraps(lowerCamelCase_ )
@tf.function(experimental_compile=lowerCamelCase_ )
def run_in_graph_mode(*lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
return func(*lowerCamelCase_ , **lowerCamelCase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__a : List[str] = random.Random()
__a : Optional[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : TensorFlowBenchmarkArguments
__SCREAMING_SNAKE_CASE : PretrainedConfig
__SCREAMING_SNAKE_CASE : str = "TensorFlow"
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
return tf.__version__
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
__a : List[Any] = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Any = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : List[Any] = __import__('transformers' , fromlist=[model_class] )
__a : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Any = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Optional[int] = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : List[str] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
__a : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
__a : Optional[int] = (
hasattr(SCREAMING_SNAKE_CASE__ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a : List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__a : Dict = __import__('transformers' , fromlist=[model_class] )
__a : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
__a : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , 'vocab_size' ) else config.encoder.vocab_size
__a : Tuple = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : List[str] = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a : Tuple = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
__a : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a : Optional[Any] = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
__a : Any = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
__a : Any = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
__a : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = meminfo.used
__a : Dict = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
__a : Any = None
else:
__a : Optional[Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
__a : Any = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a : Any = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
__a : Any = summary.total
else:
__a : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 47 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : int = 10_00 , a_ : bool = True )-> int:
'''simple docstring'''
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : int , a_ : int , a_ : int )-> None:
'''simple docstring'''
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = lower
SCREAMING_SNAKE_CASE : int = higher
SCREAMING_SNAKE_CASE : List[str] = []
while True:
SCREAMING_SNAKE_CASE : Any = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
SCREAMING_SNAKE_CASE : Dict = number
elif answer(a_ ) == "high":
SCREAMING_SNAKE_CASE : Tuple = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Tuple = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 698 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : Any = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCAmelCase__ : Optional[Any] = {"mobilebert-uncased": 5_12}
UpperCAmelCase__ : Dict = {}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = VOCAB_FILES_NAMES
snake_case__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[int] = PRETRAINED_INIT_CONFIGURATION
snake_case__ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Union[str, Any] = MobileBertTokenizer
def __init__( self : List[Any] , __magic_name__ : str=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=True , __magic_name__ : Tuple="[UNK]" , __magic_name__ : int="[SEP]" , __magic_name__ : List[str]="[PAD]" , __magic_name__ : List[str]="[CLS]" , __magic_name__ : int="[MASK]" , __magic_name__ : Dict=True , __magic_name__ : Any=None , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case
or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__magic_name__ , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__magic_name__ )
lowerCAmelCase__ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 48 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698 | 0 |
"""simple docstring"""
_lowercase : int = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowercase : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowercase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 698 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""num_attention_heads""" ) )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=4 ,_lowerCAmelCase="silu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = last_hidden_size
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = conv_kernel_size
lowerCamelCase__ = output_stride
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = use_labels
lowerCamelCase__ = is_training
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MobileViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTModelTester(self )
lowerCamelCase__ = MobileViTConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCamelCase_ ( self ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_lowerCAmelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 5
self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = MobileViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_lowerCAmelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(_lowerCAmelCase )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
lowerCamelCase__ = outputs.logits
# verify the logits
lowerCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_lowerCAmelCase )
lowerCamelCase__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=_lowerCAmelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(_lowerCAmelCase )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_lowerCAmelCase )
lowerCamelCase__ = outputs.logits.detach().cpu()
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ,target_sizes=[(50, 60)] )
lowerCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
lowerCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
| 50 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
'''simple docstring'''
from __future__ import annotations
a__ : Union[str, Any] = 'Muhammad Umer Farooq'
a__ : Dict = 'MIT'
a__ : Optional[Any] = '1.0.0'
a__ : Tuple = 'Muhammad Umer Farooq'
a__ : Any = '[email protected]'
a__ : List[Any] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a__ : str ):
super().__init__()
UpperCAmelCase = []
UpperCAmelCase = domain
def __snake_case ( self : int , a__ : str , a__ : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCAmelCase = parse.urljoin(self.domain , a__ )
self.urls.append(a__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE_ ).split('''.''' )[-2:] )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return parse.urlparse(SCREAMING_SNAKE_CASE_ ).netloc
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "https://github.com" ) -> list[str]:
"""simple docstring"""
UpperCAmelCase = get_domain_name(SCREAMING_SNAKE_CASE_ )
# Initialize the parser
UpperCAmelCase = Parser(SCREAMING_SNAKE_CASE_ )
try:
# Open URL
UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ )
# Get the valid email.
UpperCAmelCase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : str = emails_from_url('https://github.com')
print(F"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 51 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase__ : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase__ : int = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase__ : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase )
class lowercase__:
'''simple docstring'''
def __call__( self :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Union[bool, str] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :Tuple , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : List[str] = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [titles]
SCREAMING_SNAKE_CASE : Dict = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [texts]
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), f"There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts."
SCREAMING_SNAKE_CASE : Any = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :BatchEncoding , lowerCamelCase_ :DPRReaderOutput , lowerCamelCase_ :int = 16 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = reader_output[:3]
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(range(lowerCamelCase_ ) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Dict = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Optional[int] = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = DPRReaderTokenizer
| 698 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
A = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
__a : str = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__a : Tuple = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__a : Optional[int] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__a : str = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698 | 0 |
import numpy
# List of input, output pairs
_snake_case : Optional[int] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_snake_case : List[str] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_snake_case : str = [2, 4, 1, 5]
_snake_case : List[Any] = len(train_data)
_snake_case : str = 0.0_09
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[Any]="train" ):
return calculate_hypothesis_value(lowerCAmelCase_, lowerCAmelCase_ ) - output(
lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : str ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any=m ):
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = summation_of_cost_derivative(lowerCAmelCase_, lowerCAmelCase_ ) / m
return cost_derivative_value
def a_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_0002
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0, len(lowerCAmelCase_ ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_, lowerCAmelCase_, atol=lowerCAmelCase_, rtol=lowerCAmelCase_, ):
break
__lowerCAmelCase = temp_parameter_vector
print(('Number of iterations:', j) )
def a_ ( ):
for i in range(len(lowerCAmelCase_ ) ):
print(('Actual output value:', output(lowerCAmelCase_, 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCAmelCase_, 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 53 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """resnet"""
UpperCamelCase = ["""basic""", """bottleneck"""]
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase_ :int=[3, 4, 6, 3] , lowerCamelCase_ :Any="bottleneck" , lowerCamelCase_ :Optional[int]="relu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=None , **lowerCamelCase_ :Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : List[Any] = layer_type
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :str ) -> float:
'''simple docstring'''
return 1E-3
| 698 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __lowercase ):
_snake_case =['''image_processor''', '''tokenizer''']
_snake_case ='''Pix2StructImageProcessor'''
_snake_case =('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self: Tuple , _lowerCAmelCase: str=None , _lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = 2048 , _lowerCAmelCase: int = 0 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , **_lowerCAmelCase: int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , **_lowerCAmelCase )
else:
# add pixel_values and bbox
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if "attention_mask" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("input_ids" )
else:
UpperCAmelCase_ =None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self: Optional[int] , *_lowerCAmelCase: Any , **_lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer.model_input_names
UpperCAmelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "camembert"
def __init__( self : Tuple ,A : Dict=3_05_22 ,A : Any=7_68 ,A : List[Any]=12 ,A : Optional[int]=12 ,A : List[str]=30_72 ,A : Dict="gelu" ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=0.1 ,A : int=5_12 ,A : Any=2 ,A : Optional[Any]=0.02 ,A : Optional[Any]=1E-12 ,A : Any=1 ,A : Optional[Any]=0 ,A : List[Any]=2 ,A : Dict="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = classifier_dropout
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : int ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 0 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 56 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698 | 0 |
from __future__ import annotations
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = order
# a_{0} ... a_{k}
UpperCamelCase_: Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase_: List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase_: Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase_: int = [0.0] * self.order
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) < self.order:
UpperCamelCase_: Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
UpperCamelCase_: Union[str, Any] = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
UpperCamelCase_: int = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
UpperCamelCase_: int = a_coeffs
UpperCamelCase_: Optional[Any] = b_coeffs
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase_: Any = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase_: Union[str, Any] = self.input_history[:-1]
UpperCamelCase_: int = self.output_history[:-1]
UpperCamelCase_: List[Any] = sample
UpperCamelCase_: List[str] = result
return result | 57 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower_vision_model'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=3 , _lowercase=1_6 , _lowercase=2_8_8 , _lowercase=1 , _lowercase=1E-05 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Union[str, Any] = num_channels
snake_case_ : List[Any] = patch_size
snake_case_ : int = image_size
snake_case_ : Tuple = initializer_factor
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[int] = stop_gradient
snake_case_ : Optional[Any] = share_layernorm
snake_case_ : Tuple = remove_last_layer
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ : Any = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("""model_type""" ) == "bridgetower":
snake_case_ : Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower_text_model'''
def __init__( self , _lowercase=5_0_2_6_5 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=1 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_4 , _lowercase=1 , _lowercase=1E-05 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = initializer_factor
snake_case_ : List[Any] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : int = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : List[str] = pad_token_id
snake_case_ : int = bos_token_id
snake_case_ : Dict = eos_token_id
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("""model_type""" ) == "bridgetower":
snake_case_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowercase , **_lowercase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bridgetower'''
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=7_6_8 , _lowercase=1 , _lowercase=1E-05 , _lowercase=False , _lowercase="add" , _lowercase=1_2 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = kwargs.pop("""text_config_dict""" , _lowercase )
snake_case_ : List[str] = kwargs.pop("""vision_config_dict""" , _lowercase )
super().__init__(**_lowercase )
snake_case_ : str = share_cross_modal_transformer_layers
snake_case_ : int = hidden_act
snake_case_ : Tuple = hidden_size
snake_case_ : List[Any] = initializer_factor
snake_case_ : Dict = layer_norm_eps
snake_case_ : str = share_link_tower_layers
snake_case_ : int = link_tower_type
snake_case_ : str = num_attention_heads
snake_case_ : int = num_hidden_layers
snake_case_ : Tuple = tie_word_embeddings
snake_case_ : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
snake_case_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
snake_case_ : str = BridgeTowerTextConfig(**_lowercase )
snake_case_ : Any = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.text_config.to_dict()
snake_case_ : str = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 58 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__A = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = BartTokenizer
def __init__(self : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]="replace" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowerCamelCase__: str =add_prefix_space
lowerCamelCase__: Optional[Any] =pre_tok_class(**UpperCAmelCase_)
lowerCamelCase__: str =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__: Dict ="post_processor"
lowerCamelCase__: Optional[Any] =getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
if tokenizer_component_instance:
lowerCamelCase__: List[str] =json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__: Optional[Any] =tuple(state["sep"])
if "cls" in state:
lowerCamelCase__: List[Any] =tuple(state["cls"])
lowerCamelCase__: List[Any] =False
if state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Optional[int] =add_prefix_space
lowerCamelCase__: Optional[int] =True
if state.get("trim_offsets" , UpperCAmelCase_) != trim_offsets:
lowerCamelCase__: Union[str, Any] =trim_offsets
lowerCamelCase__: Optional[Any] =True
if changes_to_apply:
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , state.pop("type"))
lowerCamelCase__: Any =component_class(**UpperCAmelCase_)
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else value
lowerCamelCase__: str =value
def SCREAMING_SNAKE_CASE_ (self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: Any =kwargs.get("is_split_into_words" , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: str =kwargs.get("is_split_into_words" , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=None) ->str:
'''simple docstring'''
lowerCamelCase__: str =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.sep_token_id]
lowerCamelCase__: List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 59 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |