code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from math import isqrt
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, snake_case, snake_case ):
__magic_name__ :List[Any] = False
return [i for i in range(2, snake_case ) if is_prime[i]]
def __lowercase ( snake_case = 1_0**8 ):
"""simple docstring"""
__magic_name__ :Any = calculate_prime_numbers(max_number // 2 )
__magic_name__ :Optional[Any] = 0
__magic_name__ :Optional[Any] = 0
__magic_name__ :int = len(snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
a__ = 42
a__ = 42
a__ = 42
@dataclass
class lowerCamelCase_ :
a__ = 42
a__ = 42
a__ = None
a__ = None
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''train'''
a__ = '''dev'''
a__ = '''test'''
class lowerCamelCase_ :
@staticmethod
def A ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A ( __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase=1 , __lowerCAmelCase="[SEP]" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=-1_0_0 , __lowerCAmelCase=0 , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :str = {label: i for i, label in enumerate(__lowerCAmelCase )}
__magic_name__ :int = []
for ex_index, example in enumerate(__lowerCAmelCase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , __lowerCAmelCase , len(__lowerCAmelCase ) )
__magic_name__ :Tuple = []
__magic_name__ :str = []
for word, label in zip(example.words , example.labels ):
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCAmelCase ) > 0:
tokens.extend(__lowerCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__ :List[Any] = tokenizer.num_special_tokens_to_add()
if len(__lowerCAmelCase ) > max_seq_length - special_tokens_count:
__magic_name__ :Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__ :Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__ :Dict = [sequence_a_segment_id] * len(__lowerCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__ :Tuple = [cls_token] + tokens
__magic_name__ :Any = [pad_token_label_id] + label_ids
__magic_name__ :Optional[int] = [cls_token_segment_id] + segment_ids
__magic_name__ :int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__ :Dict = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase )
# Zero-pad up to the sequence length.
__magic_name__ :Any = max_seq_length - len(__lowerCAmelCase )
if pad_on_left:
__magic_name__ :Optional[Any] = ([pad_token] * padding_length) + input_ids
__magic_name__ :Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__ :Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__ :Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ :Tuple = None
features.append(
InputFeatures(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = nn.CrossEntropyLoss().ignore_index
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
# Load data features from cache or dataset file
__magic_name__ :Any = os.path.join(
__lowerCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ :Optional[int] = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__magic_name__ :Optional[int] = torch.load(__lowerCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__magic_name__ :int = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ :str = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ :
a__ = 42
a__ = -1_00
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ :Tuple = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ :Any = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__ :Optional[int] = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE__ : Tuple = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 13_10_72,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
}
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return torch.atana(snake_case, snake_case ) / math.pi * 2
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.sin(t * math.pi / 2 ) ** 2
__magic_name__ :Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case, snake_case )
class lowerCamelCase_ ( lowerCamelCase ):
pass
class lowerCamelCase_ ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
__magic_name__ :List[str] = DiffusionAttnUnetaD(__lowerCAmelCase , n_attn_layers=4 )
__magic_name__ :Any = deepcopy(self.diffusion )
__magic_name__ :Tuple = torch.quasirandom.SobolEngine(1 , scramble=__lowerCAmelCase )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = MODELS_MAP[model_name]['''url''']
os.system(f'''wget {url} ./''' )
return f'''./{model_name}.ckpt'''
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
SCREAMING_SNAKE_CASE__ : int = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
SCREAMING_SNAKE_CASE__ : str = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def __lowercase ( snake_case ):
"""simple docstring"""
if name.startswith('''skip''' ):
return name.replace('''skip''', RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'''ResConvBlock error with {name}''' )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def __lowercase ( snake_case ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case, snake_case ):
return name.replace(snake_case, snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case, snake_case ) for v in value]
raise ValueError(f'''Attn error with {name}''' )
def __lowercase ( snake_case, snake_case=1_3 ):
"""simple docstring"""
__magic_name__ :List[Any] = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''', '''time_proj''' )
__magic_name__ :Dict = 0
if string.startswith('''net.3.''' ):
depth += 1
__magic_name__ :int = string[6:]
elif string.startswith('''net.''' ):
__magic_name__ :str = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__magic_name__ :List[Any] = string[7:]
if string.startswith('''main.''' ):
__magic_name__ :Optional[int] = string[5:]
# mid block
if string[:2].isdigit():
__magic_name__ :Optional[Any] = string[:2]
__magic_name__ :int = string[2:]
else:
__magic_name__ :Dict = string[0]
__magic_name__ :Dict = string[1:]
if depth == max_depth:
__magic_name__ :Optional[int] = MID_NUM_TO_LAYER[layer_num]
__magic_name__ :Tuple = '''mid_block'''
elif depth > 0 and int(snake_case ) < 7:
__magic_name__ :List[Any] = DOWN_NUM_TO_LAYER[layer_num]
__magic_name__ :Optional[Any] = f'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
__magic_name__ :int = UP_NUM_TO_LAYER[layer_num]
__magic_name__ :Union[str, Any] = f'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__magic_name__ :Dict = DEPTH_0_TO_LAYER[layer_num]
__magic_name__ :Any = f'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' )
__magic_name__ :str = string_left[1:]
if "resnets" in new_layer:
__magic_name__ :Optional[Any] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
__magic_name__ :List[Any] = convert_attn_naming(snake_case )
__magic_name__ :Optional[Any] = new_string_left
if not isinstance(snake_case, snake_case ):
__magic_name__ :List[Any] = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__magic_name__ :str = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__magic_name__ :Tuple = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case, snake_case ):
__magic_name__ :Any = transform_conv_attns(snake_case, snake_case, snake_case )
else:
__magic_name__ :List[Any] = v
return new_state_dict
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
__magic_name__ :int = v[:, :, 0]
else:
# bias
__magic_name__ :Union[str, Any] = v
else:
# qkv matrices
__magic_name__ :Tuple = v.shape[0]
__magic_name__ :Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__magic_name__ :str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__magic_name__ :Any = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__magic_name__ :Optional[int] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__magic_name__ :Optional[Any] = download(snake_case )
__magic_name__ :int = MODELS_MAP[model_name]['''sample_rate''']
__magic_name__ :Optional[int] = MODELS_MAP[model_name]['''sample_size''']
__magic_name__ :List[Any] = Object()
__magic_name__ :Any = sample_size
__magic_name__ :str = sample_rate
__magic_name__ :str = 0
__magic_name__ :Tuple = UNetaDModel(sample_size=snake_case, sample_rate=snake_case )
__magic_name__ :Any = diffusers_model.state_dict()
__magic_name__ :Dict = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path, map_location=snake_case )['''state_dict'''] )
__magic_name__ :List[Any] = orig_model.diffusion_ema.eval()
__magic_name__ :Union[str, Any] = orig_model.state_dict()
__magic_name__ :Tuple = rename_orig_weights(snake_case )
__magic_name__ :int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__magic_name__ :List[str] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, f'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(snake_case ) ), f'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__magic_name__ :Dict = value.squeeze()
__magic_name__ :Optional[int] = value
diffusers_model.load_state_dict(snake_case )
__magic_name__ :Tuple = 1_0_0
__magic_name__ :List[Any] = 3_3
__magic_name__ :Union[str, Any] = IPNDMScheduler(num_train_timesteps=snake_case )
__magic_name__ :Any = torch.manual_seed(snake_case )
__magic_name__ :List[str] = torch.randn([1, 2, config.sample_size], generator=snake_case ).to(snake_case )
__magic_name__ :Any = torch.linspace(1, 0, steps + 1, device=snake_case )[:-1]
__magic_name__ :Optional[Any] = get_crash_schedule(snake_case )
__magic_name__ :int = DanceDiffusionPipeline(unet=snake_case, scheduler=snake_case )
__magic_name__ :Any = torch.manual_seed(3_3 )
__magic_name__ :int = pipe(num_inference_steps=snake_case, generator=snake_case ).audios
__magic_name__ :Optional[Any] = sampling.iplms_sample(snake_case, snake_case, snake_case, {} )
__magic_name__ :Tuple = generated.clamp(-1, 1 )
__magic_name__ :Tuple = (generated - audio).abs().sum()
__magic_name__ :Optional[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''', snake_case )
print('''Diff max''', snake_case )
assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/'''
print(f'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
main(args)
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : int = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""allegro/herbert-base-cased""": 5_14}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = HerbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="</s>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :str = [self.cls_token_id]
__magic_name__ :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Dict = [self.sep_token_id]
__magic_name__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[int] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
from __future__ import annotations
from math import pi
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = current_set.copy()
for row_index, row in enumerate(snake_case ):
__magic_name__ :Tuple = row[0]
for column_index, column in enumerate(snake_case ):
if magnitude == 0:
__magic_name__ :Optional[Any] = column
continue
__magic_name__ :Union[str, Any] = column / magnitude
# Subtract to cancel term
__magic_name__ :List[str] = current_set[0]
__magic_name__ :List[Any] = [first_row]
__magic_name__ :Any = current_set[1::]
for row in current_set:
__magic_name__ :str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case )
continue
for column_index in range(len(snake_case ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__magic_name__ :Tuple = final_set[0]
__magic_name__ :List[Any] = []
__magic_name__ :int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__magic_name__ :int = simplify(snake_case )
for i in range(len(snake_case ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, snake_case )
__magic_name__ :Any = resultant
return final_set
def __lowercase ( snake_case ):
"""simple docstring"""
if len(snake_case ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
__magic_name__ :int = len(snake_case ) + 1
if any(len(snake_case ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(snake_case, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(snake_case ) == 1:
return [equations[0][-1] / equations[0][0]]
__magic_name__ :List[Any] = equations.copy()
if any(0 in row for row in data_set ):
__magic_name__ :Any = data_set.copy()
__magic_name__ :List[str] = []
for row_index, row in enumerate(snake_case ):
if 0 not in row:
__magic_name__ :Any = data_set.pop(snake_case )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, snake_case )
__magic_name__ :Tuple = data_set.copy()
__magic_name__ :Optional[Any] = simplify(snake_case )
__magic_name__ :Optional[Any] = simplified[::-1]
__magic_name__ :list = []
for row in simplified:
__magic_name__ :int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__magic_name__ :Union[str, Any] = row.copy()[: len(snake_case ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case ) == 0:
solutions.append(0 )
continue
__magic_name__ :Dict = temp_row[1::]
__magic_name__ :Any = temp_row[::-1]
for column_index, column in enumerate(snake_case ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case )
__magic_name__ :List[Any] = []
for item in solutions:
final.append(float(round(snake_case, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case, snake_case, snake_case, snake_case="attention" ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__magic_name__ :Union[str, Any] = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__magic_name__ :int = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__magic_name__ :List[Any] = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowercase ( snake_case, snake_case, snake_case, snake_case=False ):
"""simple docstring"""
if split_mlp_wi:
__magic_name__ :List[str] = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__magic_name__ :Union[str, Any] = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__magic_name__ :List[Any] = (wi_a, wi_a)
else:
__magic_name__ :List[Any] = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
__magic_name__ :Dict = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowercase ( snake_case, *, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = traverse_util.flatten_dict(variables['''target'''] )
__magic_name__ :Dict = {'''/'''.join(snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__magic_name__ :Optional[int] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''', snake_case )
__magic_name__ :Optional[int] = collections.OrderedDict()
# Shared embeddings.
__magic_name__ :Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
__magic_name__ :int = tax_layer_norm_lookup(snake_case, snake_case, '''encoder''', '''pre_attention_layer_norm''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = tax_attention_lookup(snake_case, snake_case, '''encoder''', '''attention''' )
__magic_name__ :List[Any] = layer_norm
__magic_name__ :List[Any] = k.T
__magic_name__ :Dict = o.T
__magic_name__ :Union[str, Any] = q.T
__magic_name__ :int = v.T
# Block i, layer 1 (MLP).
__magic_name__ :Union[str, Any] = tax_layer_norm_lookup(snake_case, snake_case, '''encoder''', '''pre_mlp_layer_norm''' )
__magic_name__ , __magic_name__ :List[Any] = tax_mlp_lookup(snake_case, snake_case, '''encoder''', snake_case )
__magic_name__ :Optional[int] = layer_norm
if split_mlp_wi:
__magic_name__ :Optional[int] = wi[0].T
__magic_name__ :Any = wi[1].T
else:
__magic_name__ :str = wi.T
__magic_name__ :Tuple = wo.T
__magic_name__ :List[str] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__magic_name__ :str = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
__magic_name__ :List[Any] = tax_layer_norm_lookup(snake_case, snake_case, '''decoder''', '''pre_self_attention_layer_norm''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[str] = tax_attention_lookup(snake_case, snake_case, '''decoder''', '''self_attention''' )
__magic_name__ :Any = layer_norm
__magic_name__ :List[Any] = k.T
__magic_name__ :Any = o.T
__magic_name__ :List[str] = q.T
__magic_name__ :Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
__magic_name__ :int = tax_layer_norm_lookup(snake_case, snake_case, '''decoder''', '''pre_cross_attention_layer_norm''' )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Any = tax_attention_lookup(snake_case, snake_case, '''decoder''', '''encoder_decoder_attention''' )
__magic_name__ :Union[str, Any] = layer_norm
__magic_name__ :int = k.T
__magic_name__ :Tuple = o.T
__magic_name__ :int = q.T
__magic_name__ :Union[str, Any] = v.T
# Block i, layer 2 (MLP).
__magic_name__ :Any = tax_layer_norm_lookup(snake_case, snake_case, '''decoder''', '''pre_mlp_layer_norm''' )
__magic_name__ , __magic_name__ :int = tax_mlp_lookup(snake_case, snake_case, '''decoder''', snake_case )
__magic_name__ :Union[str, Any] = layer_norm
if split_mlp_wi:
__magic_name__ :List[str] = wi[0].T
__magic_name__ :Dict = wi[1].T
else:
__magic_name__ :Tuple = wi.T
__magic_name__ :Dict = wo.T
__magic_name__ :Dict = old['''decoder/decoder_norm/scale''']
__magic_name__ :List[str] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__magic_name__ :Optional[int] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__magic_name__ :int = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__magic_name__ :Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__magic_name__ :Any = state_dict['''shared.weight''']
return state_dict
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :int = checkpoints.load_tax_checkpoint(snake_case )
__magic_name__ :Dict = convert_tax_to_pytorch(snake_case, num_layers=config.num_layers, is_encoder_only=snake_case )
__magic_name__ :str = make_state_dict(snake_case, snake_case )
model.load_state_dict(snake_case, strict=snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case = False ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TaConfig.from_json_file(snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__magic_name__ :Optional[Any] = TaEncoderModel(snake_case )
else:
__magic_name__ :Union[str, Any] = TaForConditionalGeneration(snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case, snake_case, snake_case, snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case )
print('''Done''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=True , __lowerCAmelCase=1 / 2_5_5 , __lowerCAmelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__magic_name__ :Any = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__magic_name__ :Any = parent
__magic_name__ :Optional[int] = batch_size
__magic_name__ :str = num_channels
__magic_name__ :Any = min_resolution
__magic_name__ :Any = max_resolution
__magic_name__ :List[Any] = do_resize
__magic_name__ :Optional[int] = size
__magic_name__ :Any = do_normalize
__magic_name__ :Optional[int] = image_mean
__magic_name__ :str = image_std
__magic_name__ :Optional[Any] = do_rescale
__magic_name__ :str = rescale_factor
__magic_name__ :int = do_pad
def A ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if not batched:
__magic_name__ :Optional[int] = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
__magic_name__ , __magic_name__ :List[str] = image.size
else:
__magic_name__ , __magic_name__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
__magic_name__ :str = int(self.size['''shortest_edge'''] * h / w )
__magic_name__ :List[str] = self.size['''shortest_edge''']
elif w > h:
__magic_name__ :List[Any] = self.size['''shortest_edge''']
__magic_name__ :Optional[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
__magic_name__ :str = self.size['''shortest_edge''']
__magic_name__ :Optional[Any] = self.size['''shortest_edge''']
else:
__magic_name__ :str = []
for image in image_inputs:
__magic_name__ , __magic_name__ :Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ :Tuple = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
__magic_name__ :List[Any] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = DetaImageProcessor if is_vision_available() else None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = DetaImageProcessingTester(self )
@property
def A ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
# Initialize image_processing
__magic_name__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
__magic_name__ :List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ :str = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ :int = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
__magic_name__ :Optional[Any] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ):
"""simple docstring"""
# Initialize image_processing
__magic_name__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
__magic_name__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ :str = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ :str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ :Optional[int] = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ):
"""simple docstring"""
# Initialize image_processing
__magic_name__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
__magic_name__ :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ :Dict = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ :Dict = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
__magic_name__ , __magic_name__ :str = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A ( self ):
"""simple docstring"""
# prepare image and target
__magic_name__ :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__magic_name__ :List[Any] = json.loads(f.read() )
__magic_name__ :Tuple = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__magic_name__ :Tuple = DetaImageProcessor()
__magic_name__ :Dict = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
__magic_name__ :Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
__magic_name__ :Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
__magic_name__ :List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
__magic_name__ :List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
__magic_name__ :Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
__magic_name__ :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
__magic_name__ :Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
__magic_name__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
__magic_name__ :List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def A ( self ):
"""simple docstring"""
# prepare image, target and masks_path
__magic_name__ :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__magic_name__ :str = json.loads(f.read() )
__magic_name__ :Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__magic_name__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__magic_name__ :List[str] = DetaImageProcessor(format='''coco_panoptic''' )
__magic_name__ :Optional[int] = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
__magic_name__ :Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
__magic_name__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
__magic_name__ :str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
__magic_name__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
__magic_name__ :int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
__magic_name__ :int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
__magic_name__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
__magic_name__ :Tuple = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
__magic_name__ :List[str] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
__magic_name__ :Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
__magic_name__ :List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 0 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE__ : Optional[Any] = pytest.mark.integration
SCREAMING_SNAKE_CASE__ : Dict = {"""comet"""}
SCREAMING_SNAKE_CASE__ : Dict = importlib.util.find_spec("""fairseq""") is not None
SCREAMING_SNAKE_CASE__ : Tuple = {"""code_eval"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.name == """nt"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""bertscore""", """frugalscore""", """perplexity"""}
SCREAMING_SNAKE_CASE__ : Dict = importlib.util.find_spec("""transformers""") is not None
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
@local
class lowerCamelCase_ ( parameterized.TestCase ):
a__ = {}
a__ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = '''[...]'''
__magic_name__ :int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
__magic_name__ :Any = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
__magic_name__ :Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__magic_name__ :Any = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''[...]'''
__magic_name__ :Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__magic_name__ :int = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def A ( self ):
"""simple docstring"""
def load_local_metric(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__magic_name__ :Dict = load_local_metric
yield
@classmethod
def A ( cls , __lowerCAmelCase ):
"""simple docstring"""
def wrapper(__lowerCAmelCase ):
__magic_name__ :Union[str, Any] = contextmanager(__lowerCAmelCase )
__magic_name__ :Dict = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowercase ( snake_case ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''', '''''', '''''' ) # handle pytest cli flags
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__magic_name__ :List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowercase ( snake_case ):
"""simple docstring"""
import torch
def bert_cos_score_idf(snake_case, snake_case, *snake_case, **snake_case ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__magic_name__ :Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowercase ( snake_case ):
"""simple docstring"""
def load_from_checkpoint(snake_case ):
class lowerCamelCase_ :
def A ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
assert len(__lowerCAmelCase ) == 2
__magic_name__ :int = [0.19, 0.92]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__magic_name__ :Optional[int] = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__magic_name__ :Any = load_from_checkpoint
yield
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = load_metric(os.path.join('''metrics''', '''seqeval''' ) )
__magic_name__ :List[str] = '''ERROR'''
__magic_name__ :str = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case, match=re.escape(snake_case ) ):
metric.compute(predictions=[], references=[], scheme=snake_case )
| 0 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( snake_case, snake_case=False ):
"""simple docstring"""
try:
__magic_name__ :List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ :Dict = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ :int = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
SCREAMING_SNAKE_CASE__ : Dict = parse_flag_from_env("""RUN_REMOTE""", default=False)
SCREAMING_SNAKE_CASE__ : Any = parse_flag_from_env("""RUN_LOCAL""", default=True)
SCREAMING_SNAKE_CASE__ : Dict = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
SCREAMING_SNAKE_CASE__ : Optional[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
SCREAMING_SNAKE_CASE__ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
SCREAMING_SNAKE_CASE__ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
SCREAMING_SNAKE_CASE__ : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
SCREAMING_SNAKE_CASE__ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE__ : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__magic_name__ :Optional[Any] = unittest.skip('''test requires faiss''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__magic_name__ :Union[str, Any] = unittest.skip('''test requires regex''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ :Optional[int] = unittest.skip('''test requires elasticsearch''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ :Tuple = unittest.skip('''test requires sqlalchemy''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__magic_name__ :Optional[int] = unittest.skip('''test requires PyTorch''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__magic_name__ :Tuple = unittest.skip('''test requires TensorFlow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__magic_name__ :Optional[int] = unittest.skip('''test requires JAX''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__magic_name__ :Tuple = unittest.skip('''test requires Pillow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
def _require_spacy_model(snake_case ):
try:
import spacy # noqa F401
spacy.load(snake_case )
except ImportError:
return unittest.skip('''test requires spacy''' )(snake_case )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(snake_case ) )(snake_case )
else:
return test_case
return _require_spacy_model
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ :List[str] = unittest.skip('''test is slow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ :Optional[Any] = unittest.skip('''test is local''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ :List[str] = unittest.skip('''test is packaged''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ :Any = unittest.skip('''test requires remote''' )(snake_case )
return test_case
def __lowercase ( *snake_case ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(snake_case ) and name.startswith('''test''' ):
for decorator in decorators:
__magic_name__ :List[Any] = decorator(snake_case )
setattr(cls, snake_case, snake_case )
return cls
return decorate
class lowerCamelCase_ ( lowerCamelCase ):
pass
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 0
a__ = 1
a__ = 2
@contextmanager
def __lowercase ( snake_case=OfflineSimulationMode.CONNECTION_FAILS, snake_case=1E-1_6 ):
"""simple docstring"""
__magic_name__ :Optional[Any] = requests.Session().request
def timeout_request(snake_case, snake_case, snake_case, **snake_case ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ :str = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__magic_name__ :Any = timeout
try:
return online_request(snake_case, snake_case, **snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ :int = url
__magic_name__ :int = e.args[0]
__magic_name__ :Optional[int] = (max_retry_error.args[0].replace('''10.255.255.1''', f'''OfflineMock[{url}]''' ),)
__magic_name__ :List[str] = (max_retry_error,)
raise
def raise_connection_error(snake_case, snake_case, **snake_case ):
raise requests.ConnectionError('''Offline mode is enabled.''', request=snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''', snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''', snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''', snake_case ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __lowercase ( *snake_case, **snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case, **snake_case ) as tmp_dir:
try:
os.chdir(snake_case )
yield
finally:
os.chdir(snake_case )
@contextmanager
def __lowercase ( ):
"""simple docstring"""
import gc
gc.collect()
__magic_name__ :List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ):
"""simple docstring"""
import gc
gc.collect()
__magic_name__ :Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return deepcopy(snake_case ).integers(0, 1_0_0, 1_0 ).tolist() == deepcopy(snake_case ).integers(0, 1_0_0, 1_0 ).tolist()
def __lowercase ( snake_case ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case, *snake_case, **snake_case ):
try:
return func(*snake_case, **snake_case )
except HTTPError as err:
if str(snake_case ).startswith('''500''' ) or str(snake_case ).startswith('''502''' ):
pytest.xfail(str(snake_case ) )
raise err
return decorator.decorator(_wrapper, snake_case )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = returncode
__magic_name__ :Any = stdout
__magic_name__ :str = stderr
async def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
while True:
__magic_name__ :Optional[int] = await stream.readline()
if line:
callback(snake_case )
else:
break
async def __lowercase ( snake_case, snake_case=None, snake_case=None, snake_case=None, snake_case=False, snake_case=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''', ''' '''.join(snake_case ) )
__magic_name__ :Optional[int] = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=snake_case, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=snake_case, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ :Any = []
__magic_name__ :Optional[Any] = []
def tee(snake_case, snake_case, snake_case, snake_case="" ):
__magic_name__ :Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case, snake_case, file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda snake_case : tee(snake_case, snake_case, sys.stdout, label='''stdout:''' ) ),
_read_stream(p.stderr, lambda snake_case : tee(snake_case, snake_case, sys.stderr, label='''stderr:''' ) ),
], timeout=snake_case, )
return _RunOutput(await p.wait(), snake_case, snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=None, snake_case=1_8_0, snake_case=False, snake_case=True ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = asyncio.get_event_loop()
__magic_name__ :List[Any] = loop.run_until_complete(
_stream_subprocess(snake_case, env=snake_case, stdin=snake_case, timeout=snake_case, quiet=snake_case, echo=snake_case ) )
__magic_name__ :str = ''' '''.join(snake_case )
if result.returncode > 0:
__magic_name__ :Tuple = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = os.environ.get('''PYTEST_XDIST_WORKER''', '''gw0''' )
__magic_name__ :Union[str, Any] = re.sub(R'''^gw''', '''''', snake_case, 0, re.M )
return int(snake_case )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = 2_9_5_0_0
__magic_name__ :List[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = BigBirdConfig.from_json_file(snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__magic_name__ :List[Any] = BigBirdForQuestionAnswering(snake_case )
else:
__magic_name__ :List[Any] = BigBirdForPreTraining(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case, snake_case, is_trivia_qa=snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __lowercase ( snake_case, snake_case=False, snake_case=False, snake_case=False ):
"""simple docstring"""
__magic_name__ :List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__magic_name__ :int = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :Tuple = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ :Any = in_proj_bias[: config.hidden_size]
__magic_name__ :Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :Dict = in_proj_bias[-config.hidden_size :]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = dct.pop(snake_case )
__magic_name__ :Union[str, Any] = val
@torch.no_grad()
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = ViltConfig(image_size=3_8_4, patch_size=3_2, tie_word_embeddings=snake_case )
__magic_name__ :List[str] = False
__magic_name__ :List[str] = False
__magic_name__ :List[Any] = False
__magic_name__ :Union[str, Any] = False
if "vqa" in checkpoint_url:
__magic_name__ :Union[str, Any] = True
__magic_name__ :List[str] = 3_1_2_9
__magic_name__ :List[Any] = '''huggingface/label-files'''
__magic_name__ :List[str] = '''vqa2-id2label.json'''
__magic_name__ :Optional[Any] = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :List[Any] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Dict = idalabel
__magic_name__ :Tuple = {v: k for k, v in idalabel.items()}
__magic_name__ :List[str] = ViltForQuestionAnswering(snake_case )
elif "nlvr" in checkpoint_url:
__magic_name__ :Optional[Any] = True
__magic_name__ :List[Any] = 2
__magic_name__ :Tuple = {0: '''False''', 1: '''True'''}
__magic_name__ :List[Any] = {v: k for k, v in config.idalabel.items()}
__magic_name__ :Any = 3
__magic_name__ :Optional[int] = ViltForImagesAndTextClassification(snake_case )
elif "irtr" in checkpoint_url:
__magic_name__ :List[str] = True
__magic_name__ :Tuple = ViltForImageAndTextRetrieval(snake_case )
elif "mlm_itm" in checkpoint_url:
__magic_name__ :Dict = True
__magic_name__ :List[Any] = ViltForMaskedLM(snake_case )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__magic_name__ :Optional[Any] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )['''state_dict''']
__magic_name__ :str = create_rename_keys(snake_case, snake_case, snake_case, snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, snake_case )
if mlm_model or irtr_model:
__magic_name__ :int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ , __magic_name__ :Union[str, Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case )
# Define processor
__magic_name__ :Optional[int] = ViltImageProcessor(size=3_8_4 )
__magic_name__ :Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ :Any = ViltProcessor(snake_case, snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ :Dict = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :Union[str, Any] = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
__magic_name__ :Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''', stream=snake_case ).raw )
if mlm_model:
__magic_name__ :int = '''a bunch of [MASK] laying on a [MASK].'''
else:
__magic_name__ :Union[str, Any] = '''How many cats are there?'''
__magic_name__ :Any = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :List[str] = model(**snake_case )
# Verify outputs
if mlm_model:
__magic_name__ :Any = torch.Size([1, 1_1, 3_0_5_2_2] )
__magic_name__ :Any = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify masked token prediction equals "cats"
__magic_name__ :List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ :Union[str, Any] = torch.Size([1, 3_1_2_9] )
__magic_name__ :Union[str, Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify vqa prediction equals "2"
__magic_name__ :Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ :List[Any] = torch.Size([1, 2] )
__magic_name__ :Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__magic_name__ :Tuple = s_dict.pop(snake_case )
elif "subsample" in key:
__magic_name__ :Optional[int] = s_dict.pop(snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[int] = emb.weight.shape
__magic_name__ :Optional[Any] = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :List[str] = emb.weight.data
return lin_layer
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :List[Any] = mam_aaa['''args''']
__magic_name__ :List[str] = mam_aaa['''model''']
__magic_name__ :Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(snake_case )
rename_keys(snake_case )
__magic_name__ :List[Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__magic_name__ :Union[str, Any] = args.share_decoder_input_output_embed
__magic_name__ :Any = [int(snake_case ) for i in args.conv_kernel_sizes.split(''',''' )]
__magic_name__ :List[Any] = SpeechaTextConfig(
vocab_size=snake_case, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(snake_case ), conv_channels=args.conv_channels, conv_kernel_sizes=snake_case, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=snake_case, num_beams=5, max_length=2_0_0, use_cache=snake_case, decoder_start_token_id=2, early_stopping=snake_case, )
__magic_name__ :str = SpeechaTextForConditionalGeneration(snake_case )
__magic_name__ , __magic_name__ :List[Any] = model.model.load_state_dict(snake_case, strict=snake_case )
if len(snake_case ) > 0 and not set(snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
__magic_name__ :Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__magic_name__ :Optional[Any] = lm_head_weights
model.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 0 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''gptj'''
a__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __lowerCAmelCase=5_0_4_0_0 , __lowerCAmelCase=2_0_4_8 , __lowerCAmelCase=4_0_9_6 , __lowerCAmelCase=2_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=6_4 , __lowerCAmelCase=None , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=5_0_2_5_6 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = vocab_size
__magic_name__ :int = n_positions
__magic_name__ :Union[str, Any] = n_embd
__magic_name__ :List[Any] = n_layer
__magic_name__ :Dict = n_head
__magic_name__ :Optional[Any] = n_inner
__magic_name__ :str = rotary_dim
__magic_name__ :int = activation_function
__magic_name__ :Any = resid_pdrop
__magic_name__ :Optional[Any] = embd_pdrop
__magic_name__ :List[Any] = attn_pdrop
__magic_name__ :Dict = layer_norm_epsilon
__magic_name__ :Dict = initializer_range
__magic_name__ :Union[str, Any] = use_cache
__magic_name__ :Tuple = bos_token_id
__magic_name__ :Dict = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = "default" , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __lowerCAmelCase ):
# TODO: how to do that better?
__magic_name__ :Dict = 0
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
__magic_name__ :Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__magic_name__ :List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def A ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self ):
"""simple docstring"""
return self._config.n_head
def A ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :Any = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__magic_name__ :Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__magic_name__ , __magic_name__ :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__magic_name__ :Optional[Any] = seqlen + 2
__magic_name__ :Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ :str = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
__magic_name__ :Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
__magic_name__ :int = ordered_inputs['''attention_mask'''].dtype
__magic_name__ :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self ):
"""simple docstring"""
return 1_3
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
from __future__ import annotations
from math import pi, sqrt
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 1 |
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if len(snake_case ) != len(snake_case ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__magic_name__ :List[Any] = [p / w for p, w in zip(snake_case, snake_case )]
# Creating a copy of the list and sorting profit/weight in ascending order
__magic_name__ :Tuple = sorted(snake_case )
# declaring useful variables
__magic_name__ :str = len(snake_case )
__magic_name__ :List[str] = 0
__magic_name__ :List[str] = 0
__magic_name__ :Union[str, Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__magic_name__ :List[Any] = sorted_profit_by_weight[length - i - 1]
__magic_name__ :int = profit_by_weight.index(snake_case )
__magic_name__ :Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ : Tuple = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : str = TypeVar("""T""")
SCREAMING_SNAKE_CASE__ : int = TypeVar("""U""")
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = key
__magic_name__ :List[str] = val
__magic_name__ :DoubleLinkedListNode[T, U] | None = None
__magic_name__ :DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
__magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ , __magic_name__ :Union[str, Any] = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
__magic_name__ :Any = ['''DoubleLinkedList''']
__magic_name__ :Any = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ :str = node
__magic_name__ :str = previous
__magic_name__ :Dict = node
__magic_name__ :Optional[Any] = self.rear
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
__magic_name__ :str = node.next
__magic_name__ :Any = node.prev
__magic_name__ :int = None
__magic_name__ :List[str] = None
return node
class lowerCamelCase_ ( Generic[T, U] ):
a__ = {}
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ :Dict = capacity
__magic_name__ :Union[str, Any] = 0
__magic_name__ :Dict = 0
__magic_name__ :Optional[Any] = 0
__magic_name__ :dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , __lowerCAmelCase ):
"""simple docstring"""
return key in self.cache
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__magic_name__ :DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ :int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ :Union[str, Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ :List[Any] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ :str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ :Any = value
self.list.add(__lowerCAmelCase )
@classmethod
def A ( cls , __lowerCAmelCase = 1_2_8 ):
"""simple docstring"""
def cache_decorator_inner(__lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ :List[str] = LRUCache(__lowerCAmelCase )
__magic_name__ :Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ :Optional[Any] = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , '''cache_info''' , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = False, False, False
@dataclass
class lowerCamelCase_ :
a__ = None
a__ = True
a__ = True
a__ = None
# Automatically constructed
a__ = "dict"
a__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
a__ = field(default='''Audio''' , init=lowerCamelCase , repr=lowerCamelCase )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__magic_name__ :Tuple = BytesIO()
sf.write(__lowerCAmelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__magic_name__ :int = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
__magic_name__ :Optional[int] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
__magic_name__ :Tuple = BytesIO(bytes() )
sf.write(__lowerCAmelCase , __lowerCAmelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__magic_name__ , __magic_name__ :Optional[int] = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__magic_name__ :List[Any] = xsplitext(__lowerCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
__magic_name__ :Union[str, Any] = token_per_repo_id or {}
__magic_name__ :str = path.split('''::''' )[-1]
try:
__magic_name__ :str = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
__magic_name__ :int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__magic_name__ :int = None
with xopen(__lowerCAmelCase , '''rb''' , use_auth_token=__lowerCAmelCase ) as f:
__magic_name__ , __magic_name__ :List[str] = sf.read(__lowerCAmelCase )
else:
__magic_name__ , __magic_name__ :Union[str, Any] = sf.read(__lowerCAmelCase )
__magic_name__ :Dict = array.T
if self.mono:
__magic_name__ :Tuple = librosa.to_mono(__lowerCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__magic_name__ :Any = librosa.resample(__lowerCAmelCase , orig_sr=__lowerCAmelCase , target_sr=self.sampling_rate )
__magic_name__ :Union[str, Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
__magic_name__ :Union[str, Any] = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
__magic_name__ :Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__magic_name__ :Tuple = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
__magic_name__ :List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__magic_name__ :Dict = pa.array([Audio().encode_example(__lowerCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__magic_name__ :Tuple = storage.field('''bytes''' )
else:
__magic_name__ :List[Any] = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__magic_name__ :List[str] = storage.field('''path''' )
else:
__magic_name__ :Tuple = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
__magic_name__ :Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase ):
with xopen(__lowerCAmelCase , '''rb''' ) as f:
__magic_name__ :Any = f.read()
return bytes_
__magic_name__ :Optional[int] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__magic_name__ :List[str] = pa.array(
[os.path.basename(__lowerCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__magic_name__ :Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
from math import sqrt
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 0
for i in range(1, int(sqrt(snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case ):
total += i + n // i
elif i == sqrt(snake_case ):
total += i
return total - n
def __lowercase ( snake_case = 1_0_0_0_0 ):
"""simple docstring"""
__magic_name__ :List[str] = sum(
i
for i in range(1, snake_case )
if sum_of_divisors(sum_of_divisors(snake_case ) ) == i and sum_of_divisors(snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE__ : Tuple = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE__ : Any = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE__ : List[Any] = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
"""simple docstring"""
__magic_name__ :Optional[int] = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__magic_name__ :Any = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
__magic_name__ :List[str] = TER(
normalized=__lowerCAmelCase , no_punct=__lowerCAmelCase , asian_support=__lowerCAmelCase , case_sensitive=__lowerCAmelCase , )
__magic_name__ :Tuple = sb_ter.corpus_score(__lowerCAmelCase , __lowerCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=0.6 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Tuple = parent
__magic_name__ :List[str] = batch_size
__magic_name__ :Any = image_size
__magic_name__ :List[str] = patch_size
__magic_name__ :Optional[int] = num_channels
__magic_name__ :int = is_training
__magic_name__ :Tuple = use_labels
__magic_name__ :List[Any] = hidden_size
__magic_name__ :Dict = num_hidden_layers
__magic_name__ :Union[str, Any] = num_attention_heads
__magic_name__ :Any = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Tuple = hidden_dropout_prob
__magic_name__ :str = attention_probs_dropout_prob
__magic_name__ :int = type_sequence_label_size
__magic_name__ :Dict = initializer_range
__magic_name__ :Optional[Any] = mask_ratio
__magic_name__ :Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ :int = (image_size // patch_size) ** 2
__magic_name__ :List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :List[str] = None
if self.use_labels:
__magic_name__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :Dict = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = TFViTMAEModel(config=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = TFViTMAEForPreTraining(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase , training=__lowerCAmelCase )
# expected sequence length = num_patches
__magic_name__ :List[str] = (self.image_size // self.patch_size) ** 2
__magic_name__ :str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ :Optional[Any] = 1
__magic_name__ :Union[str, Any] = TFViTMAEForPreTraining(__lowerCAmelCase )
__magic_name__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase )
__magic_name__ :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) :Tuple = config_and_inputs
__magic_name__ :Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFViTMAEModelTester(self )
__magic_name__ :Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Optional[int] = model_class(__lowerCAmelCase )
__magic_name__ :Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :Optional[Any] = [*signature.parameters.keys()]
__magic_name__ :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Any = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :int = model_class(__lowerCAmelCase )
__magic_name__ :str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :str = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :str = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :Optional[Any] = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Any = outputs_dict[0].numpy()
__magic_name__ :Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def A ( self ):
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :str = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCAmelCase ):
__magic_name__ :Dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCAmelCase ):
__magic_name__ :Optional[int] = v.numpy()
else:
__magic_name__ :Union[str, Any] = np.array(__lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ :Union[str, Any] = model_class(__lowerCAmelCase )
__magic_name__ :Optional[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Any = prepare_numpy_arrays(__lowerCAmelCase )
__magic_name__ :Dict = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Dict = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
__magic_name__ :Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ :List[str] = tf.constant(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ :List[Any] = tf_noise
super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCAmelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__lowerCAmelCase , __lowerCAmelCase ),)
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCAmelCase , '''_keras_serializable''' , __lowerCAmelCase )
}
__magic_name__ :Dict = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ :Dict = tf.convert_to_tensor(__lowerCAmelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ :str = main_layer_class(__lowerCAmelCase )
__magic_name__ :Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ :List[str] = tf.keras.Model(__lowerCAmelCase , outputs=main_layer(__lowerCAmelCase ) )
__magic_name__ :Tuple = model(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ :List[Any] = os.path.join(__lowerCAmelCase , '''keras_model.h5''' )
model.save(__lowerCAmelCase )
__magic_name__ :Dict = tf.keras.models.load_model(
__lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCAmelCase , tf.keras.Model )
__magic_name__ :int = model(__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Dict = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :Tuple = model_class(__lowerCAmelCase )
__magic_name__ :str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ :Dict = outputs.last_hidden_state.numpy()
__magic_name__ :Any = 0
else:
__magic_name__ :Dict = outputs.logits.numpy()
__magic_name__ :Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase , saved_model=__lowerCAmelCase )
__magic_name__ :List[str] = model_class.from_pretrained(__lowerCAmelCase )
__magic_name__ :int = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ :Optional[int] = after_outputs['''last_hidden_state'''].numpy()
__magic_name__ :str = 0
else:
__magic_name__ :str = after_outputs['''logits'''].numpy()
__magic_name__ :Optional[Any] = 0
__magic_name__ :int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-5 )
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :List[str] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :Union[str, Any] = model_class(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Tuple = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCAmelCase )
__magic_name__ :List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ :Union[str, Any] = model_class.from_config(model.config )
__magic_name__ :Dict = new_model(__lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ :Any = new_model(__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def A ( self ):
"""simple docstring"""
pass
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def A ( self ):
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__magic_name__ :str = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__magic_name__ :Tuple = self.default_image_processor
__magic_name__ :Dict = prepare_img()
__magic_name__ :Dict = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ :Tuple = ViTMAEConfig()
__magic_name__ :Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ :List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ :str = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
# verify the logits
__magic_name__ :List[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :int = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : str = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
SCREAMING_SNAKE_CASE__ : List[str] = """▁"""
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ :Any = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = do_lower_case
__magic_name__ :Any = remove_space
__magic_name__ :List[str] = keep_accents
__magic_name__ :int = vocab_file
__magic_name__ :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return len(self.sp_model )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.__dict__.copy()
__magic_name__ :Union[str, Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[Any] = {}
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.remove_space:
__magic_name__ :List[str] = ''' '''.join(inputs.strip().split() )
else:
__magic_name__ :Optional[int] = inputs
__magic_name__ :Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__magic_name__ :str = unicodedata.normalize('''NFKD''' , __lowerCAmelCase )
__magic_name__ :Any = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
__magic_name__ :Optional[Any] = outputs.lower()
return outputs
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.preprocess_text(__lowerCAmelCase )
__magic_name__ :Dict = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
__magic_name__ :Any = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__magic_name__ :List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ :str = cur_pieces[1:]
else:
__magic_name__ :List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = []
__magic_name__ :List[str] = ''''''
__magic_name__ :List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Union[str, Any] = True
__magic_name__ :Tuple = []
else:
current_sub_tokens.append(__lowerCAmelCase )
__magic_name__ :Optional[int] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Any = [self.sep_token_id]
__magic_name__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = [self.sep_token_id]
__magic_name__ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :int = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :int = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''markuplm'''
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=2_1_6 , __lowerCAmelCase=1_0_0_1 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5_0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_size
__magic_name__ :List[Any] = hidden_size
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :int = num_attention_heads
__magic_name__ :Optional[Any] = hidden_act
__magic_name__ :Tuple = intermediate_size
__magic_name__ :List[str] = hidden_dropout_prob
__magic_name__ :int = attention_probs_dropout_prob
__magic_name__ :Optional[Any] = max_position_embeddings
__magic_name__ :Dict = type_vocab_size
__magic_name__ :str = initializer_range
__magic_name__ :Optional[int] = layer_norm_eps
__magic_name__ :Optional[int] = position_embedding_type
__magic_name__ :Dict = use_cache
__magic_name__ :Union[str, Any] = classifier_dropout
# additional properties
__magic_name__ :str = max_depth
__magic_name__ :List[Any] = max_xpath_tag_unit_embeddings
__magic_name__ :Any = max_xpath_subs_unit_embeddings
__magic_name__ :Any = tag_pad_id
__magic_name__ :Dict = subs_pad_id
__magic_name__ :int = xpath_unit_hidden_size
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case )
if number < 0:
return False
__magic_name__ :int = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = namedtuple("""covid_data""", """cases deaths recovered""")
def __lowercase ( snake_case = "https://www.worldometers.info/coronavirus/" ):
"""simple docstring"""
__magic_name__ :Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(snake_case ).content ).xpath(snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 0 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
def __lowercase ( snake_case = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__magic_name__ :Dict = 1
__magic_name__ :Tuple = 1
__magic_name__ :Any = {1: 1}
for inputa in range(2, snake_case ):
__magic_name__ :Dict = 0
__magic_name__ :Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__magic_name__ :Tuple = (3 * number) + 1
counter += 1
if inputa not in counters:
__magic_name__ :int = counter
if counter > pre_counter:
__magic_name__ :List[Any] = inputa
__magic_name__ :Dict = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 0 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
def __lowercase ( snake_case, snake_case = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
__magic_name__ :Optional[int] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
__magic_name__ :List[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(snake_case, 1 ):
if n < _p:
# then we have our last prime to check
__magic_name__ :int = primes[:idx]
break
__magic_name__ , __magic_name__ :Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__magic_name__ :str = False
for r in range(snake_case ):
__magic_name__ :List[Any] = pow(snake_case, d * 2**r, snake_case )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__magic_name__ :int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowercase ( ):
"""simple docstring"""
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = KandinskyVaaPipeline
a__ = [
'''image_embeds''',
'''negative_image_embeds''',
]
a__ = ['''image_embeds''', '''negative_image_embeds''']
a__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a__ = False
@property
def A ( self ):
"""simple docstring"""
return 3_2
@property
def A ( self ):
"""simple docstring"""
return 3_2
@property
def A ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A ( self ):
"""simple docstring"""
return 1_0_0
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :str = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__magic_name__ :Optional[int] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A ( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :int = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.dummy_unet
__magic_name__ :str = self.dummy_movq
__magic_name__ :Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCAmelCase , )
__magic_name__ :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
__magic_name__ :int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
__magic_name__ :Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
__magic_name__ :Dict = torch.manual_seed(__lowerCAmelCase )
else:
__magic_name__ :Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = '''cpu'''
__magic_name__ :int = self.get_dummy_components()
__magic_name__ :List[Any] = self.pipeline_class(**__lowerCAmelCase )
__magic_name__ :Optional[int] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :Optional[int] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
__magic_name__ :Optional[int] = output.images
__magic_name__ :Optional[int] = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
__magic_name__ :List[Any] = image[0, -3:, -3:, -1]
__magic_name__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__magic_name__ :str = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__magic_name__ :Dict = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
__magic_name__ :Any = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__magic_name__ :List[str] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :int = '''red cat, 4k photo'''
__magic_name__ :int = torch.Generator(device='''cuda''' ).manual_seed(0 )
__magic_name__ , __magic_name__ :int = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__magic_name__ :int = torch.Generator(device='''cuda''' ).manual_seed(0 )
__magic_name__ :Optional[Any] = pipeline(
image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , output_type='''np''' , )
__magic_name__ :Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class lowerCamelCase_ :
a__ = 42
a__ = None
a__ = None
a__ = None
a__ = None
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def A ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return Version(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return other
raise TypeError(F'''{other} (type {type(__lowerCAmelCase )}) cannot be compared to version.''' )
def __eq__( self , __lowerCAmelCase ):
"""simple docstring"""
try:
__magic_name__ :Union[str, Any] = self._validate_operand(__lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self._validate_operand(__lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A ( cls , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A ( self ):
"""simple docstring"""
return self.version_str
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __lowercase ( snake_case ):
"""simple docstring"""
return ".".join(str(snake_case ) for v in version_tuple )
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = eval_examples
__magic_name__ :Tuple = post_process_function
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :str = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Union[str, Any] = self.compute_metrics
__magic_name__ :List[str] = None
__magic_name__ :int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ :Tuple = time.time()
try:
__magic_name__ :Union[str, Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
__magic_name__ :Optional[Any] = compute_metrics
__magic_name__ :Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__magic_name__ :Dict = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :List[Any] = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
else:
__magic_name__ :str = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :List[str] = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ :List[Any] = time.time()
try:
__magic_name__ :Union[str, Any] = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
__magic_name__ :str = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :List[str] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :str = metrics.pop(__lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
| 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = 3_8_4
__magic_name__ :Tuple = 7
if "tiny" in model_name:
__magic_name__ :Dict = 9_6
__magic_name__ :List[str] = (2, 2, 6, 2)
__magic_name__ :List[str] = (3, 6, 1_2, 2_4)
elif "small" in model_name:
__magic_name__ :Dict = 9_6
__magic_name__ :Dict = (2, 2, 1_8, 2)
__magic_name__ :Any = (3, 6, 1_2, 2_4)
elif "base" in model_name:
__magic_name__ :Any = 1_2_8
__magic_name__ :Dict = (2, 2, 1_8, 2)
__magic_name__ :Optional[int] = (4, 8, 1_6, 3_2)
__magic_name__ :List[Any] = 1_2
__magic_name__ :Union[str, Any] = 5_1_2
elif "large" in model_name:
__magic_name__ :List[str] = 1_9_2
__magic_name__ :int = (2, 2, 1_8, 2)
__magic_name__ :Tuple = (6, 1_2, 2_4, 4_8)
__magic_name__ :Union[str, Any] = 1_2
__magic_name__ :Tuple = 7_6_8
# set label information
__magic_name__ :Tuple = 1_5_0
__magic_name__ :Union[str, Any] = '''huggingface/label-files'''
__magic_name__ :List[Any] = '''ade20k-id2label.json'''
__magic_name__ :Tuple = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Dict = {v: k for k, v in idalabel.items()}
__magic_name__ :Any = SwinConfig(
embed_dim=snake_case, depths=snake_case, num_heads=snake_case, window_size=snake_case, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
__magic_name__ :Any = UperNetConfig(
backbone_config=snake_case, auxiliary_in_channels=snake_case, num_labels=snake_case, idalabel=snake_case, labelaid=snake_case, )
return config
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = dct.pop(snake_case )
__magic_name__ :Optional[int] = val
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ :List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ :Dict = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Optional[int] = in_proj_weight[:dim, :]
__magic_name__ :Optional[Any] = in_proj_bias[: dim]
__magic_name__ :int = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ :List[str] = in_proj_bias[
dim : dim * 2
]
__magic_name__ :Dict = in_proj_weight[
-dim :, :
]
__magic_name__ :Any = in_proj_bias[-dim :]
# fmt: on
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = x.shape
__magic_name__ :int = x.reshape(snake_case, 4, in_channel // 4 )
__magic_name__ :str = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(snake_case, snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = x.shape
__magic_name__ :Optional[int] = x.reshape(snake_case, in_channel // 4, 4 )
__magic_name__ :Tuple = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(snake_case, snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Dict = x.shape[0]
__magic_name__ :Dict = x.reshape(4, in_channel // 4 )
__magic_name__ :List[str] = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(snake_case )
return x
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = x.shape[0]
__magic_name__ :Dict = x.reshape(in_channel // 4, 4 )
__magic_name__ :Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(snake_case )
return x
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__magic_name__ :Dict = model_name_to_url[model_name]
__magic_name__ :Dict = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''', file_name=snake_case )[
'''state_dict'''
]
for name, param in state_dict.items():
print(snake_case, param.shape )
__magic_name__ :Any = get_upernet_config(snake_case )
__magic_name__ :str = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__magic_name__ :Optional[Any] = state_dict.pop(snake_case )
if "bn" in key:
__magic_name__ :Tuple = key.replace('''bn''', '''batch_norm''' )
__magic_name__ :List[str] = val
# rename keys
__magic_name__ :Union[str, Any] = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__magic_name__ :Dict = reverse_correct_unfold_reduction_order(snake_case )
if "norm" in key:
__magic_name__ :Any = reverse_correct_unfold_norm_order(snake_case )
model.load_state_dict(snake_case )
# verify on image
__magic_name__ :str = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__magic_name__ :Any = Image.open(requests.get(snake_case, stream=snake_case ).raw ).convert('''RGB''' )
__magic_name__ :List[Any] = SegformerImageProcessor()
__magic_name__ :str = processor(snake_case, return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__magic_name__ :List[Any] = model(snake_case )
__magic_name__ :List[Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__magic_name__ :List[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__magic_name__ :List[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__magic_name__ :Dict = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__magic_name__ :Union[str, Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 5_0 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.unet.config.sample_size
__magic_name__ :Tuple = (batch_size, 3, img_size, img_size)
__magic_name__ :str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__magic_name__ :Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__magic_name__ :Tuple = self.scheduler.schedule[t]
__magic_name__ :int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__magic_name__ , __magic_name__ :Dict = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ :int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__magic_name__ :List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ :List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__magic_name__ :str = self.scheduler.step_correct(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output['''derivative'''] , )
__magic_name__ :Dict = step_output.prev_sample
__magic_name__ :Dict = (sample / 2 + 0.5).clamp(0 , 1 )
__magic_name__ :str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ :List[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
a__ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def A ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
_validate_point(snake_case )
_validate_point(snake_case )
if len(snake_case ) != len(snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(snake_case, snake_case ) ) )
def __lowercase ( snake_case ):
"""simple docstring"""
if point:
if isinstance(snake_case, snake_case ):
for item in point:
if not isinstance(snake_case, (int, float) ):
__magic_name__ :str = (
'''Expected a list of numbers as input, found '''
f'''{type(snake_case ).__name__}'''
)
raise TypeError(snake_case )
else:
__magic_name__ :List[Any] = f'''Expected a list of numbers as input, found {type(snake_case ).__name__}'''
raise TypeError(snake_case )
else:
raise ValueError('''Missing an input''' )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
_validate_point(snake_case )
_validate_point(snake_case )
if len(snake_case ) != len(snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(snake_case, snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 2_0_0_0 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = self.unet.config.sample_size
__magic_name__ :Optional[Any] = (batch_size, 3, img_size, img_size)
__magic_name__ :Tuple = self.unet
__magic_name__ :Dict = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
__magic_name__ :str = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ :Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__magic_name__ :List[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Tuple = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Dict = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
__magic_name__ , __magic_name__ :List[str] = output.prev_sample, output.prev_sample_mean
__magic_name__ :Tuple = sample_mean.clamp(0 , 1 )
__magic_name__ :List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ :Union[str, Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''efficientformer'''
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 4_4_8 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 2_2_4 , __lowerCAmelCase = 1E-05 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = hidden_act
__magic_name__ :int = hidden_dropout_prob
__magic_name__ :List[Any] = hidden_sizes
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :str = num_attention_heads
__magic_name__ :int = initializer_range
__magic_name__ :Optional[Any] = layer_norm_eps
__magic_name__ :Optional[Any] = patch_size
__magic_name__ :int = num_channels
__magic_name__ :Optional[int] = depths
__magic_name__ :List[Any] = mlp_expansion_ratio
__magic_name__ :int = downsamples
__magic_name__ :Dict = dim
__magic_name__ :List[Any] = key_dim
__magic_name__ :List[Any] = attention_ratio
__magic_name__ :str = resolution
__magic_name__ :Optional[int] = pool_size
__magic_name__ :List[Any] = downsample_patch_size
__magic_name__ :int = downsample_stride
__magic_name__ :Any = downsample_pad
__magic_name__ :Optional[Any] = drop_path_rate
__magic_name__ :List[str] = num_metaad_blocks
__magic_name__ :Any = distillation
__magic_name__ :Tuple = use_layer_scale
__magic_name__ :Dict = layer_scale_init_value
__magic_name__ :List[Any] = image_size
__magic_name__ :Optional[Any] = batch_norm_eps
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = AutoConfig.from_pretrained(snake_case )
__magic_name__ :Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case )
__magic_name__ :Any = checkpoints.load_tax_checkpoint(snake_case )
__magic_name__ :List[str] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__magic_name__ :Tuple = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ :Optional[int] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ :Union[str, Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :Optional[int] = flax_model.params['''encoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :List[Any] = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Optional[int] = tax_attention_query
__magic_name__ :str = tax_attention_value
__magic_name__ :Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ :str = tax_mlp_wi_a
__magic_name__ :Dict = tax_mlp_wi_a
else:
__magic_name__ :Tuple = tax_mlp_wi
__magic_name__ :Optional[int] = tax_mlp_wo
__magic_name__ :Optional[int] = tax_mlp_layer_norm
__magic_name__ :Any = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ :Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :List[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__magic_name__ :Dict = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__magic_name__ :List[str] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ :List[Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
__magic_name__ :Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel''']
__magic_name__ :List[str] = tax_enc_dec_attention_module['''query''']['''kernel''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :List[str] = flax_model.params['''decoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :Any = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Tuple = tax_attention_query
__magic_name__ :Tuple = tax_attention_value
__magic_name__ :Tuple = tax_pre_attention_layer_norm
__magic_name__ :Optional[Any] = tax_enc_dec_attention_key
__magic_name__ :str = tax_enc_dec_attention_out
__magic_name__ :Union[str, Any] = tax_enc_dec_attention_query
__magic_name__ :Any = tax_enc_dec_attention_value
__magic_name__ :Tuple = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ :Optional[int] = tax_mlp_wi_a
__magic_name__ :Union[str, Any] = tax_mlp_wi_a
else:
__magic_name__ :Optional[int] = tax_mlp_wi
__magic_name__ :List[str] = tax_mlp_wo
__magic_name__ :int = txa_mlp_layer_norm
__magic_name__ :str = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__magic_name__ :Tuple = txa_decoder_norm
# Only for layer 0:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :str = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ :List[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
__magic_name__ :Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ :int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :str = '''laion/clap-htsat-unfused'''
__magic_name__ :Optional[int] = tempfile.mkdtemp()
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :str = self.get_feature_extractor()
__magic_name__ :Tuple = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__magic_name__ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ :Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ :Dict = self.get_feature_extractor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
__magic_name__ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_feature_extractor()
__magic_name__ :Dict = self.get_tokenizer()
__magic_name__ :Optional[Any] = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
__magic_name__ :Any = floats_list((3, 1_0_0_0) )
__magic_name__ :Optional[Any] = feature_extractor(__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :List[str] = processor(audios=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_feature_extractor()
__magic_name__ :Tuple = self.get_tokenizer()
__magic_name__ :Optional[int] = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
__magic_name__ :List[Any] = '''This is a test string'''
__magic_name__ :Any = processor(text=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_feature_extractor()
__magic_name__ :Optional[Any] = self.get_tokenizer()
__magic_name__ :Any = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
__magic_name__ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ :Optional[Any] = processor.batch_decode(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.get_feature_extractor()
__magic_name__ :List[Any] = self.get_tokenizer()
__magic_name__ :List[Any] = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = TextaTextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return generator, ["Something to write", "Something else"]
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = generator('''Something there''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__magic_name__ :Union[str, Any] = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
__magic_name__ :List[str] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
generator(4 )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__magic_name__ :Any = generator('''Something there''' , do_sample=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] )
__magic_name__ :str = 3
__magic_name__ :Optional[Any] = generator(
'''Something there''' , num_return_sequences=__lowerCAmelCase , num_beams=__lowerCAmelCase , )
__magic_name__ :str = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__magic_name__ :str = generator.model.config.eos_token_id
__magic_name__ :Dict = '''<pad>'''
__magic_name__ :List[str] = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__magic_name__ :Union[str, Any] = generator('''Something there''' , do_sample=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] )
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = data
__magic_name__ :Node | None = None
__magic_name__ :Node | None = None
def __lowercase ( snake_case ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( snake_case ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( snake_case ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ): # Main function for testing.
"""simple docstring"""
__magic_name__ :List[str] = Node(1 )
__magic_name__ :str = Node(2 )
__magic_name__ :int = Node(3 )
__magic_name__ :str = Node(4 )
__magic_name__ :List[str] = Node(5 )
__magic_name__ :Dict = Node(6 )
__magic_name__ :List[str] = Node(7 )
__magic_name__ :Tuple = Node(8 )
__magic_name__ :Optional[Any] = Node(9 )
print(is_full_binary_tree(snake_case ) )
print(depth_of_tree(snake_case ) )
print('''Tree is: ''' )
display(snake_case )
if __name__ == "__main__":
main()
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Tuple = parent
__magic_name__ :Dict = batch_size
__magic_name__ :Optional[int] = image_size
__magic_name__ :Optional[Any] = patch_size
__magic_name__ :int = num_channels
__magic_name__ :Union[str, Any] = is_training
__magic_name__ :Optional[Any] = use_labels
__magic_name__ :str = hidden_size
__magic_name__ :Optional[int] = num_hidden_layers
__magic_name__ :Optional[Any] = num_attention_heads
__magic_name__ :Optional[Any] = intermediate_size
__magic_name__ :str = hidden_act
__magic_name__ :Tuple = hidden_dropout_prob
__magic_name__ :Optional[int] = attention_probs_dropout_prob
__magic_name__ :int = type_sequence_label_size
__magic_name__ :Optional[int] = initializer_range
__magic_name__ :Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ :Tuple = (image_size // patch_size) ** 2
__magic_name__ :int = num_patches + 1
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :List[Any] = None
if self.use_labels:
__magic_name__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :str = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ViTMSNModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = self.type_sequence_label_size
__magic_name__ :List[str] = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ :Optional[int] = 1
__magic_name__ :Any = ViTMSNForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ :Union[str, Any] = config_and_inputs
__magic_name__ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = ViTMSNModelTester(self )
__magic_name__ :Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :int = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Union[str, Any] = model_class(__lowerCAmelCase )
__magic_name__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :List[str] = [*signature.parameters.keys()]
__magic_name__ :str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :str = ViTMSNModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def A ( self ):
"""simple docstring"""
torch.manual_seed(2 )
__magic_name__ :int = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__lowerCAmelCase )
__magic_name__ :Any = self.default_image_processor
__magic_name__ :Dict = prepare_img()
__magic_name__ :Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
__magic_name__ :Tuple = model(**__lowerCAmelCase )
# verify the logits
__magic_name__ :Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCamelCase ):
a__ = ['''note_seq''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
__magic_name__ :List[Any] = str(abs(snake_case ) )
__magic_name__ :Dict = [list(snake_case ) for char in range(len(snake_case ) )]
for index in range(len(snake_case ) ):
num_transpositions[index].pop(snake_case )
return max(
int(''''''.join(list(snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
SCREAMING_SNAKE_CASE__ : List[Any] = get_logger(__name__)
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case=0 ):
"""simple docstring"""
os.makedirs(snake_case, exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
__magic_name__ :Tuple = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ :List[Any] = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
__magic_name__ :int = os.path.join(snake_case, snake_case )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(snake_case, snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ :Optional[int] = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__magic_name__ :List[Any] = os.path.join(snake_case, snake_case )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(snake_case, snake_case )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ :Optional[Any] = os.path.join(snake_case, f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(snake_case, exist_ok=snake_case )
logger.info(f'''Saving model to {ckpt_dir}''' )
__magic_name__ :List[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case, storage_writer=dist_cp.FileSystemWriter(snake_case ), planner=DefaultSavePlanner(), )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__magic_name__ :List[Any] = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
__magic_name__ :Any = os.path.join(snake_case, snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
__magic_name__ :Any = torch.load(snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ :Union[str, Any] = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__magic_name__ :Tuple = os.path.join(snake_case, snake_case )
logger.info(f'''Loading model from {input_model_file}''' )
__magic_name__ :List[Any] = torch.load(snake_case )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ :Tuple = (
os.path.join(snake_case, f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
__magic_name__ :Any = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case, storage_reader=dist_cp.FileSystemReader(snake_case ), planner=DefaultLoadPlanner(), )
__magic_name__ :Any = state_dict['''model''']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case=0 ):
"""simple docstring"""
os.makedirs(snake_case, exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
__magic_name__ :List[Any] = FSDP.optim_state_dict(snake_case, snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__magic_name__ :Optional[int] = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__magic_name__ :str = os.path.join(snake_case, snake_case )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(snake_case, snake_case )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
__magic_name__ :Tuple = os.path.join(snake_case, f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(snake_case, exist_ok=snake_case )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(snake_case ), planner=DefaultSavePlanner(), )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ :Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__magic_name__ :Optional[Any] = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__magic_name__ :str = os.path.join(snake_case, snake_case )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
__magic_name__ :Optional[int] = torch.load(snake_case )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__magic_name__ :Tuple = (
os.path.join(snake_case, f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
__magic_name__ :List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(snake_case ), )
__magic_name__ :str = optim_state['''optimizer''']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
__magic_name__ :Tuple = FSDP.optim_state_dict_to_load(snake_case, snake_case, snake_case )
optimizer.load_state_dict(snake_case )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import math
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE__ : Optional[Any] = """Enter the base and the power separated by a comma: """
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = map(int, input(prompt).split(""","""))
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE__ : List[Any] = res(xa, ya)
SCREAMING_SNAKE_CASE__ : Dict = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
from math import pi
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 0 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case = None, snake_case = None ):
"""simple docstring"""
if start is None:
__magic_name__ :int = 0
if end is None:
__magic_name__ :Optional[Any] = len(snake_case ) - 1
if start >= end:
return
__magic_name__ :Tuple = (start + end) // 2
slowsort(snake_case, snake_case, snake_case )
slowsort(snake_case, mid + 1, snake_case )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ :List[Any] = sequence[mid], sequence[end]
slowsort(snake_case, snake_case, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
import requests
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = {'''Content-Type''': '''application/json'''}
__magic_name__ :Union[str, Any] = requests.post(snake_case, json={'''text''': message_body}, headers=snake_case )
if response.status_code != 2_0_0:
__magic_name__ :Any = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = CanineTokenizer
a__ = False
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Any = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
__magic_name__ :List[Any] = 1_0_2_4
return tokenizer
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.canine_tokenizer
__magic_name__ :int = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__magic_name__ :Tuple = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
__magic_name__ :int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.canine_tokenizer
__magic_name__ :Optional[int] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__magic_name__ :Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertIn('''token_type_ids''' , __lowerCAmelCase )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.canine_tokenizer
__magic_name__ :Dict = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__magic_name__ :Tuple = tokenizer(
text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def A ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
__magic_name__ :Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__magic_name__ :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :List[Any] = tempfile.mkdtemp()
__magic_name__ :Optional[int] = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :List[Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
__magic_name__ :int = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ :Any = tempfile.mkdtemp()
__magic_name__ :List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ :Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ :Optional[Any] = chr(0XE007 )
additional_special_tokens.append(__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ :str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
__magic_name__ :Optional[int] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn(__lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__magic_name__ :Any = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ , __magic_name__ :Any = self.get_clean_sequence(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
__magic_name__ :str = 0XE005
__magic_name__ :Dict = chr(__lowerCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
__magic_name__ :Tuple = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCAmelCase )
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , input_encoded + special_token_id )
__magic_name__ :List[str] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ :Dict = chr(0XE005 )
__magic_name__ :Optional[Any] = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__magic_name__ :Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCAmelCase )
self.assertEqual(token_a[0] , __lowerCAmelCase )
@require_tokenizers
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__magic_name__ :int = 0XE006
__magic_name__ :Dict = chr(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCAmelCase )
tokenizer.from_pretrained(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :Union[str, Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ :Union[str, Any] = json.load(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
__magic_name__ :int = 0XE006
__magic_name__ :Union[str, Any] = chr(__lowerCAmelCase )
__magic_name__ :Optional[int] = [new_token_a]
__magic_name__ :List[str] = [new_token_a]
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ :Optional[Any] = tokenizer_class.from_pretrained(__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ :Union[str, Any] = 0XE007
__magic_name__ :Tuple = chr(__lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ :Dict = [AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )]
__magic_name__ :Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ :Optional[int] = '''hello world'''
if self.space_between_special_tokens:
__magic_name__ :Optional[Any] = '''[CLS] hello world [SEP]'''
else:
__magic_name__ :Dict = input
__magic_name__ :List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.decode(__lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCAmelCase , [output, output.lower()] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ :str = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__magic_name__ :Tuple = '''a'''
__magic_name__ :str = ord(__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + '''_id''' , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + '''_id''' ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + '''_id''' , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + '''_id''' ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens_ids''' ) , [] )
__magic_name__ :str = 0XE006
__magic_name__ :Optional[Any] = chr(__lowerCAmelCase )
setattr(__lowerCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :str = parent
__magic_name__ :Dict = batch_size
__magic_name__ :List[Any] = seq_length
__magic_name__ :int = is_training
__magic_name__ :Optional[int] = use_input_mask
__magic_name__ :Tuple = use_token_type_ids
__magic_name__ :Optional[Any] = use_labels
__magic_name__ :Tuple = vocab_size
__magic_name__ :List[str] = hidden_size
__magic_name__ :str = num_hidden_layers
__magic_name__ :int = num_attention_heads
__magic_name__ :Tuple = intermediate_multiple_size
__magic_name__ :int = hidden_act
__magic_name__ :Optional[int] = hidden_dropout
__magic_name__ :Optional[Any] = attention_dropout
__magic_name__ :Optional[Any] = weight_tying
__magic_name__ :str = max_position_embeddings
__magic_name__ :Any = type_vocab_size
__magic_name__ :Optional[int] = type_sequence_label_size
__magic_name__ :List[str] = initializer_range
__magic_name__ :Dict = num_labels
__magic_name__ :Optional[Any] = num_choices
__magic_name__ :List[Any] = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :Optional[Any] = None
if self.use_input_mask:
__magic_name__ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :List[str] = None
if self.use_labels:
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def A ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[Any] = self.prepare_config_and_inputs()
__magic_name__ :Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = True
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = True
__magic_name__ :Dict = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
__magic_name__ :List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
__magic_name__ :Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ :Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
__magic_name__ :int = output_from_no_past['''hidden_states'''][0]
__magic_name__ :Dict = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__magic_name__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ :Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :int = GPTNeoXJapaneseModelTester(self )
__magic_name__ :Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
__magic_name__ :List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = '''abeja/gpt-neox-japanese-2.7b'''
__magic_name__ :Union[str, Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__magic_name__ :Union[str, Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__magic_name__ :Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
__magic_name__ :Optional[Any] = []
for prompt in prompts:
__magic_name__ :Optional[int] = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids
__magic_name__ :List[Any] = model.generate(__lowerCAmelCase , max_length=5_0 )
__magic_name__ :Dict = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
import numpy as np
def __lowercase ( snake_case ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowercase ( snake_case ):
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
import os
def __lowercase ( ):
"""simple docstring"""
with open(os.path.dirname(snake_case ) + '''/grid.txt''' ) as f:
__magic_name__ :int = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(snake_case ) for x in f.readline().split()] )
__magic_name__ :Union[str, Any] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
__magic_name__ :Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__magic_name__ :Optional[Any] = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
__magic_name__ :Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__magic_name__ :Optional[int] = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
__magic_name__ :int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__magic_name__ :Optional[Any] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3, 2_0 ):
__magic_name__ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__magic_name__ :List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, ):
"""simple docstring"""
__magic_name__ :Optional[int] = len(snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], snake_case, snake_case, )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :list[list[str]] = []
depth_first_search([], [], [], snake_case, snake_case )
# Print all the boards
for board in boards:
for column in board:
print(snake_case )
print('''''' )
print(len(snake_case ), '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''', ['''paws''', '''csv'''] )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
inspect_dataset(snake_case, snake_case )
__magic_name__ :Optional[Any] = path + '''.py'''
assert script_name in os.listdir(snake_case )
assert "__pycache__" not in os.listdir(snake_case )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''', ['''accuracy'''] )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
inspect_metric(snake_case, snake_case )
__magic_name__ :Dict = path + '''.py'''
assert script_name in os.listdir(snake_case )
assert "__pycache__" not in os.listdir(snake_case )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''', [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :int = get_dataset_config_info(snake_case, config_name=snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''', [
('''paws''', None, ValueError),
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
with pytest.raises(snake_case ):
get_dataset_config_info(snake_case, config_name=snake_case )
@pytest.mark.parametrize(
'''path, expected''', [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = get_dataset_config_names(snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''', [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :int = get_dataset_infos(snake_case )
assert list(infos.keys() ) == expected_configs
__magic_name__ :List[Any] = expected_configs[0]
assert expected_config in infos
__magic_name__ :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''', [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = get_dataset_infos(snake_case )
assert expected_config in infos
__magic_name__ :Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''', [
('''paws''', None, ValueError),
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
with pytest.raises(snake_case ):
get_dataset_split_names(snake_case, config_name=snake_case )
| 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""PoolFormerFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : int = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ :Tuple = ''''''
__magic_name__ :Optional[Any] = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ :List[Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ :List[str] = [1 for i in range(len(snake_case ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ :Dict = 0
for j in range(len(snake_case ) ):
__magic_name__ :Optional[Any] = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ :Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ :Optional[Any] = j - k + 1 # noqa: E741
__magic_name__ :Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ :Union[str, Any] = length[j]
__magic_name__ :Union[str, Any] = j
# create that string
__magic_name__ :Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 1 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : List[Any] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
SCREAMING_SNAKE_CASE__ : Tuple = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ :str = np.array([re.sub(__lowerCAmelCase , '''''' , __lowerCAmelCase ) for x in predictions] )
__magic_name__ :Dict = np.array([re.sub(__lowerCAmelCase , '''''' , __lowerCAmelCase ) for x in references] )
else:
__magic_name__ :Tuple = np.asarray(__lowerCAmelCase )
__magic_name__ :Dict = np.asarray(__lowerCAmelCase )
if ignore_case:
__magic_name__ :int = np.char.lower(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
__magic_name__ :List[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__magic_name__ :int = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__magic_name__ :Any = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
__magic_name__ :Any = string.digits.maketrans('''''' , '''''' , string.digits )
__magic_name__ :Optional[Any] = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__magic_name__ :Tuple = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__magic_name__ :Optional[Any] = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 1_0_0}
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = UniSpeechSatForSequenceClassification.from_pretrained(snake_case, config=snake_case )
__magic_name__ :Tuple = downstream_dict['''projector.weight''']
__magic_name__ :str = downstream_dict['''projector.bias''']
__magic_name__ :Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__magic_name__ :Optional[int] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case, config=snake_case )
__magic_name__ :Union[str, Any] = downstream_dict['''model.linear.weight''']
__magic_name__ :List[Any] = downstream_dict['''model.linear.bias''']
return model
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = UniSpeechSatForXVector.from_pretrained(snake_case, config=snake_case )
__magic_name__ :Dict = downstream_dict['''connector.weight''']
__magic_name__ :Optional[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__magic_name__ :Any = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__magic_name__ :Any = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__magic_name__ :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__magic_name__ :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__magic_name__ :Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__magic_name__ :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__magic_name__ :Optional[int] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = checkpoint['''Downstream''']
__magic_name__ :List[Any] = UniSpeechSatConfig.from_pretrained(snake_case )
__magic_name__ :Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
snake_case, return_attention_mask=snake_case, do_normalize=snake_case )
__magic_name__ :Any = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__magic_name__ :Tuple = convert_classification(snake_case, snake_case, snake_case )
elif arch.endswith('''ForAudioFrameClassification''' ):
__magic_name__ :int = convert_diarization(snake_case, snake_case, snake_case )
elif arch.endswith('''ForXVector''' ):
__magic_name__ :Union[str, Any] = convert_xvector(snake_case, snake_case, snake_case )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__magic_name__ :List[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = []
for line in lines:
__magic_name__ :Dict = re.sub(R'''#.*''', '''''', snake_case ) # remove comments
if line:
filtered_lines.append(snake_case )
__magic_name__ :int = '''\n'''.join(snake_case )
# Make a hash from all this code
__magic_name__ :Optional[int] = full_str.encode('''utf-8''' )
return shaaaa(snake_case ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ : Dict = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :int = load_from_cache_file
__magic_name__ :List[str] = file_format
__magic_name__ :Dict = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__magic_name__ :Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__ :Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase , cache_dir=__lowerCAmelCase )
__magic_name__ :Dict = [t[-1] for t in os.walk(os.path.join(__lowerCAmelCase , os.listdir(__lowerCAmelCase )[0] , '''snapshots''' ) )]
__magic_name__ :Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :int = jax.random.PRNGKey(0 )
__magic_name__ :Any = 4
__magic_name__ :str = jax.device_count()
__magic_name__ :List[str] = num_samples * [prompt]
__magic_name__ :str = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :int = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__magic_name__ :List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCAmelCase ) == num_samples
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowerCAmelCase )
__magic_name__ :Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :List[str] = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Tuple = jax.device_count()
__magic_name__ :Any = num_samples * [prompt]
__magic_name__ :Dict = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :List[str] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = shard(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase )
__magic_name__ :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :str = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :List[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Dict = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = shard(__lowerCAmelCase )
__magic_name__ :List[str] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__magic_name__ :Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Any = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Optional[Any] = jax.device_count()
__magic_name__ :Optional[Any] = num_samples * [prompt]
__magic_name__ :Optional[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :Tuple = replicate(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
__magic_name__ , __magic_name__ :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = scheduler.create_state()
__magic_name__ :Optional[int] = scheduler_state
__magic_name__ :int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :Tuple = jax.random.PRNGKey(0 )
__magic_name__ :Any = 5_0
__magic_name__ :Dict = jax.device_count()
__magic_name__ :List[Any] = num_samples * [prompt]
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
__magic_name__ :List[str] = replicate(__lowerCAmelCase )
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = shard(__lowerCAmelCase )
__magic_name__ :Tuple = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__magic_name__ :str = jax.device_count()
__magic_name__ :str = num_samples * [prompt]
__magic_name__ :Dict = jax.random.split(jax.random.PRNGKey(0 ) , __lowerCAmelCase )
__magic_name__ , __magic_name__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , )
__magic_name__ :Any = replicate(__lowerCAmelCase )
__magic_name__ :Optional[int] = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :int = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__magic_name__ , __magic_name__ :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , use_memory_efficient_attention=__lowerCAmelCase , )
__magic_name__ :List[Any] = replicate(__lowerCAmelCase )
__magic_name__ :Any = pipeline.prepare_inputs(__lowerCAmelCase )
__magic_name__ :Any = shard(__lowerCAmelCase )
__magic_name__ :Any = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__ :List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1, len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__magic_name__ :List[Any] = grid[0]
for row_n in range(1, len(snake_case ) ):
__magic_name__ :Optional[Any] = grid[row_n]
__magic_name__ :List[str] = fill_row(snake_case, snake_case )
__magic_name__ :Any = grid[row_n]
return grid[-1][-1]
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1, len(snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = 0
while number > 0:
__magic_name__ :List[Any] = number % 1_0
sum_of_digits += last_digit
__magic_name__ :Dict = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def __lowercase ( snake_case = 1_0_0 ):
"""simple docstring"""
__magic_name__ :Optional[int] = factorial(snake_case )
__magic_name__ :str = split_and_add(snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 0 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def __lowercase ( snake_case, snake_case=1_0_0_0 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__magic_name__ :List[str] = n - 1
__magic_name__ :List[str] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__magic_name__ :int = 0
while count < prec:
__magic_name__ :Any = random.randint(2, n - 1 )
__magic_name__ :str = bin_exp_mod(snake_case, snake_case, snake_case )
if b != 1:
__magic_name__ :Optional[Any] = True
for _ in range(snake_case ):
if b == n - 1:
__magic_name__ :int = False
break
__magic_name__ :Union[str, Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = None
if token is not None:
__magic_name__ :List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__magic_name__ :List[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__magic_name__ :List[str] = requests.get(snake_case, headers=snake_case ).json()
__magic_name__ :str = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__magic_name__ :Tuple = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(snake_case ):
__magic_name__ :List[str] = requests.get(url + f'''&page={i + 2}''', headers=snake_case ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :Tuple = None
if token is not None:
__magic_name__ :Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__magic_name__ :Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__magic_name__ :Optional[int] = requests.get(snake_case, headers=snake_case ).json()
__magic_name__ :Tuple = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__magic_name__ :Optional[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(snake_case ):
__magic_name__ :str = requests.get(url + f'''&page={i + 2}''', headers=snake_case ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Dict = None
if token is not None:
__magic_name__ :str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__magic_name__ :List[Any] = requests.get(snake_case, headers=snake_case, allow_redirects=snake_case )
__magic_name__ :str = result.headers['''Location''']
__magic_name__ :List[Any] = requests.get(snake_case, allow_redirects=snake_case )
__magic_name__ :Tuple = os.path.join(snake_case, f'''{artifact_name}.zip''' )
with open(snake_case, '''wb''' ) as fp:
fp.write(response.content )
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :int = []
__magic_name__ :str = []
__magic_name__ :Dict = None
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case ) as f:
for line in f:
__magic_name__ :List[Any] = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__magic_name__ :int = line[: line.index(''': ''' )]
__magic_name__ :List[str] = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__magic_name__ :Optional[int] = line[len('''FAILED ''' ) :]
failed_tests.append(snake_case )
elif filename == "job_name.txt":
__magic_name__ :Union[str, Any] = line
if len(snake_case ) != len(snake_case ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case )} for `errors` '''
f'''and {len(snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
__magic_name__ :Union[str, Any] = None
if job_name and job_links:
__magic_name__ :List[str] = job_links.get(snake_case, snake_case )
# A list with elements of the form (line of error, error, failed test)
__magic_name__ :Optional[Any] = [x + [y] + [job_link] for x, y in zip(snake_case, snake_case )]
return result
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :List[str] = []
__magic_name__ :Optional[int] = [os.path.join(snake_case, snake_case ) for p in os.listdir(snake_case ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case, job_links=snake_case ) )
return errors
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :int = Counter()
counter.update([x[1] for x in logs] )
__magic_name__ :Union[str, Any] = counter.most_common()
__magic_name__ :List[str] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__magic_name__ :List[Any] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__magic_name__ :Optional[Any] = dict(sorted(r.items(), key=lambda snake_case : item[1]["count"], reverse=snake_case ) )
return r
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__magic_name__ :int = test.split('''/''' )[2]
else:
__magic_name__ :List[Any] = None
return test
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__magic_name__ :Optional[Any] = [x for x in logs if x[2] is not None]
__magic_name__ :Union[str, Any] = {x[2] for x in logs}
__magic_name__ :Any = {}
for test in tests:
__magic_name__ :Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__magic_name__ :str = counter.most_common()
__magic_name__ :int = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__magic_name__ :List[str] = sum(error_counts.values() )
if n_errors > 0:
__magic_name__ :Optional[int] = {'''count''': n_errors, '''errors''': error_counts}
__magic_name__ :Tuple = dict(sorted(r.items(), key=lambda snake_case : item[1]["count"], reverse=snake_case ) )
return r
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = '''| no. | error | status |'''
__magic_name__ :Any = '''|-:|:-|:-|'''
__magic_name__ :str = [header, sep]
for error in reduced_by_error:
__magic_name__ :Tuple = reduced_by_error[error]['''count''']
__magic_name__ :List[Any] = f'''| {count} | {error[:1_0_0]} | |'''
lines.append(snake_case )
return "\n".join(snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = '''| model | no. of errors | major error | count |'''
__magic_name__ :List[Any] = '''|-:|-:|-:|-:|'''
__magic_name__ :Optional[Any] = [header, sep]
for model in reduced_by_model:
__magic_name__ :str = reduced_by_model[model]['''count''']
__magic_name__ , __magic_name__ :List[Any] = list(reduced_by_model[model]['''errors'''].items() )[0]
__magic_name__ :List[str] = f'''| {model} | {count} | {error[:6_0]} | {_count} |'''
lines.append(snake_case )
return "\n".join(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
SCREAMING_SNAKE_CASE__ : str = get_job_links(args.workflow_run_id, token=args.token)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
SCREAMING_SNAKE_CASE__ : Optional[int] = k.find(""" / """)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k[index + len(""" / """) :]
SCREAMING_SNAKE_CASE__ : int = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
SCREAMING_SNAKE_CASE__ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE__ : str = reduce_by_error(errors)
SCREAMING_SNAKE_CASE__ : str = reduce_by_model(errors)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_github_table(reduced_by_error)
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
SCREAMING_SNAKE_CASE__ : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__magic_name__ :Stack[int] = Stack()
__magic_name__ :Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
__magic_name__ :int = operator_stack.peek()
operator_stack.pop()
__magic_name__ :Dict = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Optional[int] = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Any = operators[opr](snake_case, snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class lowerCamelCase_ ( lowerCamelCase ):
a__ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ = Features({'''text''': Value('''string''' )} )
a__ = Features({} )
a__ = "text"
@property
def A ( self ):
"""simple docstring"""
return {self.text_column: "text"}
| 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 0 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowercase ( snake_case, snake_case=False ):
"""simple docstring"""
__magic_name__ :List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ :List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __lowercase ( snake_case, snake_case, snake_case=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ :List[Any] = ''''''
else:
__magic_name__ :Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ :Dict = in_proj_bias[: config.hidden_size]
__magic_name__ :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :List[str] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :List[str] = in_proj_bias[-config.hidden_size :]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = dct.pop(snake_case )
__magic_name__ :Tuple = val
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ViTMSNConfig()
__magic_name__ :Optional[int] = 1_0_0_0
__magic_name__ :Optional[int] = '''datasets/huggingface/label-files'''
__magic_name__ :Optional[Any] = '''imagenet-1k-id2label.json'''
__magic_name__ :List[str] = json.load(open(hf_hub_download(snake_case, snake_case ), '''r''' ) )
__magic_name__ :Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :List[str] = idalabel
__magic_name__ :List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ :Any = 3_8_4
__magic_name__ :Optional[Any] = 1_5_3_6
__magic_name__ :List[Any] = 6
elif "l16" in checkpoint_url:
__magic_name__ :Optional[Any] = 1_0_2_4
__magic_name__ :List[Any] = 4_0_9_6
__magic_name__ :Optional[int] = 2_4
__magic_name__ :str = 1_6
__magic_name__ :Dict = 0.1
elif "b4" in checkpoint_url:
__magic_name__ :int = 4
elif "l7" in checkpoint_url:
__magic_name__ :Dict = 7
__magic_name__ :List[Any] = 1_0_2_4
__magic_name__ :Optional[Any] = 4_0_9_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Dict = 1_6
__magic_name__ :List[Any] = 0.1
__magic_name__ :List[Any] = ViTMSNModel(snake_case )
__magic_name__ :Union[str, Any] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )['''target_encoder''']
__magic_name__ :Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case )
__magic_name__ :str = create_rename_keys(snake_case, base_model=snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, snake_case, base_model=snake_case )
model.load_state_dict(snake_case )
model.eval()
__magic_name__ :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ :Optional[Any] = Image.open(requests.get(snake_case, stream=snake_case ).raw )
__magic_name__ :Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=snake_case, image_std=snake_case )
__magic_name__ :List[str] = image_processor(images=snake_case, return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__magic_name__ :Optional[Any] = model(**snake_case )
__magic_name__ :Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ :Optional[int] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__magic_name__ :int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__magic_name__ :Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__magic_name__ :int = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__magic_name__ :int = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], snake_case, atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Dict = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : str = {
"""gpt-neox-20b""": 20_48,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
__magic_name__ :List[str] = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
__magic_name__ :List[str] = add_prefix_space
__magic_name__ :List[str] = pre_tok_class(**__lowerCAmelCase )
__magic_name__ :Any = add_prefix_space
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
__magic_name__ :Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase = 0 ):
"""simple docstring"""
__magic_name__ :str = key
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCAmelCase ) ^ key ) for ch in content]
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__lowerCAmelCase ) ^ key ) for ch in content]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
__magic_name__ :List[Any] = ''''''
for ch in content:
ans += chr(ord(__lowerCAmelCase ) ^ key )
return ans
def A ( self , __lowerCAmelCase , __lowerCAmelCase = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
__magic_name__ :List[str] = ''''''
for ch in content:
ans += chr(ord(__lowerCAmelCase ) ^ key )
return ans
def A ( self , __lowerCAmelCase , __lowerCAmelCase = 0 ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
try:
with open(__lowerCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCAmelCase , __lowerCAmelCase ) )
except OSError:
return False
return True
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
try:
with open(__lowerCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCAmelCase , __lowerCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = CTRLTokenizer
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ :List[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__magic_name__ :Optional[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__magic_name__ :Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__magic_name__ :str = {'''unk_token''': '''<unk>'''}
__magic_name__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = '''adapt react readapt apt'''
__magic_name__ :Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ :List[Any] = '''adapt react readapt apt'''
__magic_name__ :str = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__magic_name__ :List[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = tokens + [tokenizer.unk_token]
__magic_name__ :int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 1 |
import random
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = a[left_index]
__magic_name__ :str = left_index + 1
for j in range(left_index + 1, snake_case ):
if a[j] < pivot:
__magic_name__ , __magic_name__ :Any = a[i], a[j]
i += 1
__magic_name__ , __magic_name__ :int = a[i - 1], a[left_index]
return i - 1
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if left < right:
__magic_name__ :Optional[Any] = random.randint(snake_case, right - 1 )
__magic_name__ , __magic_name__ :Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__magic_name__ :Tuple = partition(snake_case, snake_case, snake_case )
quick_sort_random(
snake_case, snake_case, snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case, pivot_index + 1, snake_case ) # recursive quicksort to the right of the pivot point
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__magic_name__ :int = [int(snake_case ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case, 0, len(snake_case ) )
print(snake_case )
if __name__ == "__main__":
main()
| 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''timesformer'''
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=8 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase="divided_space_time" , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :str = image_size
__magic_name__ :Tuple = patch_size
__magic_name__ :Any = num_channels
__magic_name__ :str = num_frames
__magic_name__ :Optional[Any] = hidden_size
__magic_name__ :Union[str, Any] = num_hidden_layers
__magic_name__ :Optional[Any] = num_attention_heads
__magic_name__ :Optional[int] = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :str = hidden_dropout_prob
__magic_name__ :Optional[Any] = attention_probs_dropout_prob
__magic_name__ :Optional[Any] = initializer_range
__magic_name__ :Dict = layer_norm_eps
__magic_name__ :List[Any] = qkv_bias
__magic_name__ :int = attention_type
__magic_name__ :Optional[Any] = drop_path_rate
| 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE__ : Dict = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, bootstrap_aggregation=snake_case, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(snake_case, snake_case )
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, bootstrap_aggregation=snake_case, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Dict = '''rougeLsum'''
__magic_name__ :Any = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=[k] )[k]
__magic_name__ :List[str] = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=[k] )[k]
assert score > score_no_sep
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = ['''rouge1''', '''rouge2''', '''rougeL''']
__magic_name__ :int = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=snake_case )
__magic_name__ :Optional[int] = calculate_rouge(snake_case, snake_case, newline_sep=snake_case, rouge_keys=snake_case )
assert score_sep == score_no_sep
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Any = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__magic_name__ :Dict = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(snake_case, snake_case, newline_sep=snake_case ) == calculate_rouge(snake_case, snake_case, newline_sep=snake_case )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__magic_name__ :int = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__magic_name__ :Union[str, Any] = calculate_rouge(snake_case, snake_case, rouge_keys=['''rougeLsum'''], newline_sep=snake_case )['''rougeLsum''']
__magic_name__ :List[Any] = calculate_rouge(snake_case, snake_case, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :int = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__magic_name__ :Tuple = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(snake_case, snake_case )
__magic_name__ :Union[str, Any] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=snake_case )
assert isinstance(snake_case, snake_case )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
SCREAMING_SNAKE_CASE__ : List[Any] = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config_name == "default":
__magic_name__ :str = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__magic_name__ :Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
"""simple docstring"""
if gpus is None:
__magic_name__ :Optional[Any] = 1 if torch.cuda.is_available() else 0
__magic_name__ :List[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__magic_name__ :List[str] = [dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) for t in zip(*data.values() )]
__magic_name__ , __magic_name__ :Optional[Any] = self.scorer.predict(__lowerCAmelCase , gpus=__lowerCAmelCase , progress_bar=__lowerCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = 0
for ch in input_str:
__magic_name__ :int = ord(snake_case )
__magic_name__ :Dict = pow(2, snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = TapasConfig.from_json_file(snake_case )
# set absolute/relative position embeddings parameter
__magic_name__ :Optional[int] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__magic_name__ :Any = TapasForQuestionAnswering(config=snake_case )
elif task == "WTQ":
# run_task_main.py hparams
__magic_name__ :Any = 4
__magic_name__ :int = True
# hparam_utils.py hparams
__magic_name__ :int = 0.66_4694
__magic_name__ :str = 0.20_7951
__magic_name__ :List[Any] = 0.12_1194
__magic_name__ :List[Any] = True
__magic_name__ :int = True
__magic_name__ :Tuple = False
__magic_name__ :Tuple = 0.035_2513
__magic_name__ :str = TapasForQuestionAnswering(config=snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__magic_name__ :Optional[Any] = 4
__magic_name__ :Union[str, Any] = False
# hparam_utils.py hparams
__magic_name__ :Optional[int] = 36.4519
__magic_name__ :Dict = 0.90_3421
__magic_name__ :Any = 222.088
__magic_name__ :str = True
__magic_name__ :int = True
__magic_name__ :int = True
__magic_name__ :List[str] = 0.76_3141
__magic_name__ :Optional[Any] = TapasForQuestionAnswering(config=snake_case )
elif task == "TABFACT":
__magic_name__ :str = TapasForSequenceClassification(config=snake_case )
elif task == "MLM":
__magic_name__ :Any = TapasForMaskedLM(config=snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
__magic_name__ :str = TapasModel(config=snake_case )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(snake_case, snake_case, snake_case )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__magic_name__ :List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''', model_max_length=5_1_2 )
tokenizer.save_pretrained(snake_case )
print('''Used relative position embeddings:''', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
__magic_name__ , __magic_name__ :List[str] = head.next, head
while fast and fast.next:
__magic_name__ :str = fast.next.next
__magic_name__ :int = slow.next
__magic_name__ :Optional[int] = slow.next
__magic_name__ :Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
__magic_name__ :Optional[int] = None
while second:
__magic_name__ :List[str] = second.next
__magic_name__ :Dict = node
__magic_name__ :Dict = second
__magic_name__ :Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__magic_name__ :int = node.next
__magic_name__ :Dict = head.next
return True
def __lowercase ( snake_case ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__magic_name__ :Dict = head
while fast and fast.next:
__magic_name__ , __magic_name__ :Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
__magic_name__ :Union[str, Any] = [slow.val]
while slow.next:
__magic_name__ :List[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__magic_name__ :Optional[int] = cur.next
return True
def __lowercase ( snake_case ):
"""simple docstring"""
if not head or not head.next:
return True
__magic_name__ :List[str] = {}
__magic_name__ :Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(snake_case )
else:
__magic_name__ :Optional[Any] = [pos]
__magic_name__ :Optional[Any] = head.next
pos += 1
__magic_name__ :Optional[int] = pos - 1
__magic_name__ :Union[str, Any] = 0
for v in d.values():
if len(snake_case ) % 2 != 0:
middle += 1
else:
__magic_name__ :int = 0
for i in range(0, len(snake_case ) ):
if v[i] + v[len(snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''vivit'''
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=[2, 1_6, 1_6] , __lowerCAmelCase=3 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_fast" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-06 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = hidden_size
__magic_name__ :Tuple = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :int = intermediate_size
__magic_name__ :Optional[int] = hidden_act
__magic_name__ :str = hidden_dropout_prob
__magic_name__ :int = attention_probs_dropout_prob
__magic_name__ :Dict = initializer_range
__magic_name__ :Optional[int] = layer_norm_eps
__magic_name__ :Optional[int] = image_size
__magic_name__ :Optional[int] = num_frames
__magic_name__ :List[Any] = tubelet_size
__magic_name__ :List[str] = num_channels
__magic_name__ :Optional[int] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 0 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
from itertools import permutations
def __lowercase ( snake_case ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__magic_name__ :Union[str, Any] = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(snake_case ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def __lowercase ( snake_case = 1_0 ):
"""simple docstring"""
return sum(
int(''''''.join(map(snake_case, snake_case ) ) )
for num in permutations(range(snake_case ) )
if is_substring_divisible(snake_case ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __lowercase ( snake_case ):
"""simple docstring"""
if "model" in orig_key:
__magic_name__ :List[Any] = orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
__magic_name__ :Union[str, Any] = orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__magic_name__ :Optional[Any] = orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
__magic_name__ :List[str] = orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
__magic_name__ :Dict = orig_key.split('''.''' )[0].split('''_''' )[-1]
__magic_name__ :Tuple = orig_key.replace(f'''transformer_{layer_num}''', f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
__magic_name__ :Optional[Any] = orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
__magic_name__ :Optional[Any] = orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
__magic_name__ :str = orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
__magic_name__ :Optional[int] = orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
__magic_name__ :Dict = orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
__magic_name__ :int = orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
__magic_name__ :Any = orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
__magic_name__ :Optional[Any] = orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
__magic_name__ :List[str] = orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__magic_name__ :str = orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
__magic_name__ :Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__magic_name__ :int = val
__magic_name__ :Any = orig_state_dict['''cls.predictions.decoder.bias''']
__magic_name__ :Union[str, Any] = torch.arange(snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model_state_dict''']
__magic_name__ :Optional[int] = YosoConfig.from_json_file(snake_case )
__magic_name__ :str = YosoForMaskedLM(snake_case )
__magic_name__ :str = convert_checkpoint_helper(config.max_position_embeddings, snake_case )
print(model.load_state_dict(snake_case ) )
model.eval()
model.save_pretrained(snake_case )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 0 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ : List[Any] = logging.getLogger(__name__)
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase=-1 ):
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ :int = label_idx
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Dict = mode.value
__magic_name__ :List[Any] = os.path.join(__lowerCAmelCase , F'''{mode}.txt''' )
__magic_name__ :List[str] = 1
__magic_name__ :Tuple = []
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
__magic_name__ :Tuple = []
__magic_name__ :int = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
__magic_name__ :str = []
__magic_name__ :Tuple = []
else:
__magic_name__ :Optional[Any] = line.split(''' ''' )
words.append(splits[0] )
if len(__lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
return examples
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(__lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ :Dict = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(__lowerCAmelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if path:
with open(__lowerCAmelCase , '''r''' ) as f:
__magic_name__ :str = f.read().splitlines()
if "O" not in labels:
__magic_name__ :Optional[int] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self ):
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if path:
with open(__lowerCAmelCase , '''r''' ) as f:
__magic_name__ :Optional[Any] = f.read().splitlines()
if "O" not in labels:
__magic_name__ :Optional[Any] = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[Any] = mode.value
__magic_name__ :Optional[Any] = os.path.join(__lowerCAmelCase , F'''{mode}.txt''' )
__magic_name__ :str = 1
__magic_name__ :Tuple = []
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(__lowerCAmelCase ):
__magic_name__ :Tuple = []
__magic_name__ :Any = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
return examples
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = 0
for sentence in parse_incr(__lowerCAmelCase ):
__magic_name__ :Tuple = preds_list[example_id]
__magic_name__ :int = ''''''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__lowerCAmelCase )
example_id += 1
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if path:
with open(__lowerCAmelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 0 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 1 |