code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = TapasConfig.from_json_file(_lowercase )
# set absolute/relative position embeddings parameter
__UpperCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = True
# hparam_utils.py hparams
__UpperCamelCase = 0.66_46_94
__UpperCamelCase = 0.20_79_51
__UpperCamelCase = 0.12_11_94
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = 0.0_35_25_13
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase = 4
__UpperCamelCase = False
# hparam_utils.py hparams
__UpperCamelCase = 36.45_19
__UpperCamelCase = 0.90_34_21
__UpperCamelCase = 2_22.0_88
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0.76_31_41
__UpperCamelCase = TapasForQuestionAnswering(config=_lowercase )
elif task == "TABFACT":
__UpperCamelCase = TapasForSequenceClassification(config=_lowercase )
elif task == "MLM":
__UpperCamelCase = TapasForMaskedLM(config=_lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase = TapasModel(config=_lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowercase , _lowercase , _lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__UpperCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(_lowercase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
def __init__( self: Dict,A_: List[Any],A_: Dict=13,A_: Union[str, Any]=30,A_: str=2,A_: int=3,A_: Any=True,A_: Union[str, Any]=True,A_: Dict=32,A_: Optional[Any]=5,A_: Optional[Any]=4,A_: Tuple=37,A_: List[Any]="gelu",A_: str=0.1,A_: Tuple=0.1,A_: List[str]=10,A_: Union[str, Any]=0.0_2,A_: Dict=3,A_: Any=0.6,A_: int=None,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = mask_ratio
__UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self: Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=A_,initializer_range=self.initializer_range,mask_ratio=self.mask_ratio,)
def snake_case_ ( self: Optional[Any],A_: List[str],A_: List[Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ViTMAEModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self: List[Any],A_: Optional[int],A_: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(A_ )
__UpperCamelCase = (self.image_size // self.patch_size) ** 2
__UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(A_ )
__UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowercase = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ViTMAEModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,has_text_modality=A_,hidden_size=37 )
def snake_case_ ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_,nn.Linear ) )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1],A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def snake_case_ ( self: str,A_: str,A_: List[str],A_: Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
__UpperCamelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCamelCase = torch.from_numpy(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase = pt_noise
super().check_pt_tf_models(A_,A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) )
__UpperCamelCase = outputs[0].cpu().numpy()
__UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__UpperCamelCase = model_class.from_pretrained(A_ )
model.to(A_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) )
# Make sure we don't have nans
__UpperCamelCase = after_outputs[0].cpu().numpy()
__UpperCamelCase = 0
__UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ViTMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _A ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCamelCase (unittest.TestCase ):
@cached_property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
__UpperCamelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(A_ )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=A_,return_tensors='pt' ).to(A_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase = ViTMAEConfig()
__UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**A_,noise=torch.from_numpy(A_ ).to(device=A_ ) )
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape,A_ )
__UpperCamelCase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3],expected_slice.to(A_ ),atol=1E-4 ) )
| 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = CpmAntTokenizer
_lowercase = False
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
__UpperCamelCase = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__UpperCamelCase = '今天天气真好!'
__UpperCamelCase = ['今天', '天气', '真', '好', '!']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = '今天天气真好!'
__UpperCamelCase = [tokenizer.bos_token] + tokens
__UpperCamelCase = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_,A_ )
| 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 1 |
__snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( _lowercase ) -> bool:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Expected string as input, found {type(_lowercase ).__name__}'''
raise TypeError(_lowercase )
__UpperCamelCase = spanish_id.replace('-' , '' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
__UpperCamelCase = int(spanish_id_clean[0:8] )
__UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 1 |
def _A ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _lowercase ) -> None:
"""simple docstring"""
assert all(row == sorted(_lowercase , reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase , reverse=_lowercase ) for col in zip(*_lowercase ) )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(_lowercase ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def _A ( _lowercase ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def _A ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
__UpperCamelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f'''{func}(grid=grid)''' , setup=_lowercase , number=5_00 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 1 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 1 |
import math
def _A ( _lowercase ) -> bool:
"""simple docstring"""
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def _A ( _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
__snake_case = {
'''google/pegasus-xsum''': 5_1_2,
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PegasusTokenizer
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: Tuple,A_: Optional[Any]=None,A_: List[str]=None,A_: Dict="<pad>",A_: int="</s>",A_: List[str]="<unk>",A_: Tuple="<mask_2>",A_: Any="<mask_1>",A_: Union[str, Any]=None,A_: Optional[int]=103,**A_: List[str],):
'''simple docstring'''
__UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(A_,A_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(A_ )}, but is'''
F''' {type(A_ )}''' )
__UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(A_ ),self.offset - 1 )
]
if len(set(A_ ) ) != len(A_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__UpperCamelCase = additional_special_tokens_extended
else:
__UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2,self.offset )]
super().__init__(
A_,tokenizer_file=A_,pad_token=A_,eos_token=A_,unk_token=A_,mask_token=A_,mask_token_sent=A_,offset=A_,additional_special_tokens=A_,**A_,)
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
def snake_case_ ( self: Optional[Any],A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case_ ( self: List[Any],A_: List,A_: Optional[List] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(A_ )
elif token_ids_a is None:
return self._special_token_mask(A_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case_ ( self: int,A_: Dict,A_: List[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self: List[Any],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file,A_ )
return (out_vocab_file,)
| 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return abs(_lowercase ) if a == 0 else greatest_common_divisor(b % a , _lowercase )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__UpperCamelCase, __UpperCamelCase = y, x % y
return abs(_lowercase )
def _A ( ) -> Optional[int]:
"""simple docstring"""
try:
__UpperCamelCase = input('Enter two integers separated by comma (,): ' ).split(',' )
__UpperCamelCase = int(nums[0] )
__UpperCamelCase = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_lowercase , _lowercase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowercase , _lowercase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """data2vec-text"""
def __init__( self: Any,A_: Union[str, Any]=3_0522,A_: Optional[Any]=768,A_: Optional[int]=12,A_: int=12,A_: int=3072,A_: Tuple="gelu",A_: List[str]=0.1,A_: str=0.1,A_: str=512,A_: Union[str, Any]=2,A_: Union[str, Any]=0.0_2,A_: Optional[int]=1E-12,A_: Dict=1,A_: str=0,A_: Any=2,A_: Optional[int]="absolute",A_: Tuple=True,A_: int=None,**A_: List[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 1 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 1 |
from math import pi
def _A ( _lowercase , _lowercase ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 1 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case = open # noqa: we just need to have a builtin inside this module to test it properly
| 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 1 |
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
def count_of_possible_combinations(_lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_lowercase , _lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowercase )
for item in array )
__UpperCamelCase = answer
return answer
__UpperCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = [0] * (target + 1)
__UpperCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(_lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = 3
__snake_case = 5
__snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = DiTPipeline
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowercase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = TransformeraDModel(
sample_size=16,num_layers=2,patch_size=4,attention_head_dim=8,num_attention_heads=2,in_channels=4,out_channels=8,attention_bias=A_,activation_fn='gelu-approximate',num_embeds_ada_norm=1000,norm_type='ada_norm_zero',norm_elementwise_affine=A_,)
__UpperCamelCase = AutoencoderKL()
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def snake_case_ ( self: Optional[Any],A_: Any,A_: int=0 ):
'''simple docstring'''
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = pipe(**A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 16, 16, 3) )
__UpperCamelCase = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_,1E-3 )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=A_,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__UpperCamelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
__UpperCamelCase = pipe.get_label_ids(A_ )
__UpperCamelCase = pipe(A_,generator=A_,num_inference_steps=40,output_type='np' ).images
for word, image in zip(A_,A_ ):
__UpperCamelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__UpperCamelCase = ['vase', 'umbrella']
__UpperCamelCase = pipe.get_label_ids(A_ )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(A_,generator=A_,num_inference_steps=25,output_type='np' ).images
for word, image in zip(A_,A_ ):
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__snake_case = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
if isinstance(_lowercase , torch.Tensor ):
return image
elif isinstance(_lowercase , PIL.Image.Image ):
__UpperCamelCase = [image]
__UpperCamelCase = [trans(img.convert('RGB' ) ) for img in image]
__UpperCamelCase = torch.stack(_lowercase )
return image
class __lowerCamelCase (_a ):
def __init__( self: int,A_: Optional[int],A_: Optional[int] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A_,scheduler=A_ )
def snake_case_ ( self: Optional[int],A_: Optional[Any] ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def snake_case_ ( self: int,A_: Optional[Any],A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = min(int(num_inference_steps * strength ),A_ )
__UpperCamelCase = max(num_inference_steps - init_timestep,0 )
__UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self: Optional[Any],A_: List[str],A_: Optional[Any],A_: Optional[Any],A_: Optional[int],A_: Optional[int],A_: Tuple=None ):
'''simple docstring'''
if not isinstance(A_,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A_ )}''' )
__UpperCamelCase = image.to(device=A_,dtype=A_ )
if isinstance(A_,A_ ) and len(A_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(A_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCamelCase = init_latents.shape
__UpperCamelCase = randn_tensor(A_,generator=A_,device=A_,dtype=A_ )
# get latents
print('add noise to latents at timestep',A_ )
__UpperCamelCase = self.scheduler.add_noise(A_,A_,A_ )
__UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self: Any,A_: Union[torch.FloatTensor, PIL.Image.Image] = None,A_: float = 0.8,A_: int = 1,A_: Optional[Union[torch.Generator, List[torch.Generator]]] = None,A_: float = 0.0,A_: int = 50,A_: Optional[bool] = None,A_: Optional[str] = "pil",A_: bool = True,):
'''simple docstring'''
self.check_inputs(A_ )
# 2. Preprocess image
__UpperCamelCase = preprocess(A_ )
# 3. set timesteps
self.scheduler.set_timesteps(A_,device=self.device )
__UpperCamelCase, __UpperCamelCase = self.get_timesteps(A_,A_,self.device )
__UpperCamelCase = timesteps[:1].repeat(A_ )
# 4. Prepare latent variables
__UpperCamelCase = self.prepare_latents(A_,A_,A_,self.unet.dtype,self.device,A_ )
__UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(A_ ):
# 1. predict noise model_output
__UpperCamelCase = self.unet(A_,A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
A_,A_,A_,eta=A_,use_clipped_model_output=A_,generator=A_,).prev_sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0,1 )
__UpperCamelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A_ )
| 1 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowerCamelCase (_a , _a ):
@register_to_config
def __init__( self: List[Any],A_: int = 768,):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1,A_ ) )
__UpperCamelCase = nn.Parameter(torch.ones(1,A_ ) )
def snake_case_ ( self: Tuple,A_: Optional[Union[str, torch.device]] = None,A_: Optional[torch.dtype] = None,):
'''simple docstring'''
__UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
__UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def snake_case_ ( self: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case_ ( self: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCamelCase (_a ):
_lowercase = """"""
_lowercase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self: Tuple,A_: Optional[DatasetInfo] = None,A_: Optional[str] = None,**A_: List[str],):
'''simple docstring'''
super().__init__(self,**A_ )
__UpperCamelCase = repo_info
__UpperCamelCase = token
__UpperCamelCase = None
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
if self.dir_cache is None:
__UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(A_ ): {'name': str(A_ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self: Any,A_: str,A_: str = "rb",**A_: str,):
'''simple docstring'''
if not isinstance(self.repo_info,A_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__UpperCamelCase = hf_hub_url(self.repo_info.id,A_,revision=self.repo_info.sha )
return fsspec.open(
A_,mode=A_,headers=get_authentication_headers_for_url(A_,use_auth_token=self.token ),client_kwargs={'trust_env': True},).open()
def snake_case_ ( self: int,A_: Optional[int],**A_: Any ):
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def snake_case_ ( self: List[Any],A_: Union[str, Any],A_: Any=False,**A_: Any ):
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = PurePosixPath(path.strip('/' ) )
__UpperCamelCase = {}
for p, f in self.dir_cache.items():
__UpperCamelCase = PurePosixPath(p.strip('/' ) )
__UpperCamelCase = p.parent
if root == path:
__UpperCamelCase = f
__UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
from math import pi, sqrt
def _A ( _lowercase ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(_lowercase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(_lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case = 1.0
while num:
__snake_case = float(input('''Gamma of: '''))
print(f"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''')
| 1 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
import datasets
from .evaluate import evaluate
__snake_case = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__snake_case = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__snake_case = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ),codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],)
def snake_case_ ( self: int,A_: int,A_: int ):
'''simple docstring'''
__UpperCamelCase = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__UpperCamelCase = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__UpperCamelCase = evaluate(dataset=A_,predictions=A_ )
return score
| 1 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: List[Any],A_: Dict,A_: List[str]=7,A_: Dict=3,A_: int=18,A_: Optional[Any]=30,A_: Dict=400,A_: int=True,A_: Tuple=None,A_: Any=True,):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = apply_ocr
def snake_case_ ( self: Dict ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_,'do_resize' ) )
self.assertTrue(hasattr(A_,'size' ) )
self.assertTrue(hasattr(A_,'apply_ocr' ) )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict,size=42 )
self.assertEqual(image_processor.size,{'height': 42, 'width': 42} )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_,Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
self.assertIsInstance(encoding.words,A_ )
self.assertIsInstance(encoding.boxes,A_ )
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa',split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase = image_processing(A_,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ),len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words,A_ )
self.assertListEqual(encoding.boxes,A_ )
# with apply_OCR = False
__UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase = image_processing(A_,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape,(1, 3, 224, 224) )
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
__snake_case = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = RetriBertTokenizer
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int],A_: int=None,A_: Optional[Any]=None,A_: List[Any]=True,A_: Any="[UNK]",A_: List[Any]="[SEP]",A_: Optional[Any]="[PAD]",A_: Dict="[CLS]",A_: Union[str, Any]="[MASK]",A_: Optional[Any]=True,A_: Dict=None,**A_: Dict,):
'''simple docstring'''
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',A_ ) != do_lower_case
or normalizer_state.get('strip_accents',A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**A_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: List[str],A_: Tuple,A_: List[Any]=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: str,A_: str,A_: Optional[str] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(A_,name=A_ )
return tuple(A_ )
| 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 1 |
import torch
from diffusers import StableDiffusionPipeline
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
__snake_case = '''A photo of sks dog in a bucket'''
__snake_case = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowerCamelCase (_a ):
_lowercase = """fnet"""
def __init__( self: Optional[Any],A_: str=3_2000,A_: Optional[Any]=768,A_: str=12,A_: List[str]=3072,A_: Union[str, Any]="gelu_new",A_: Optional[int]=0.1,A_: List[str]=512,A_: Optional[Any]=4,A_: Optional[int]=0.0_2,A_: Optional[Any]=1E-12,A_: int=False,A_: Any=512,A_: Optional[Any]=3,A_: List[Any]=1,A_: Tuple=2,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_tpu_fourier_optimizations
__UpperCamelCase = tpu_short_seq_length
| 1 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """CLIPImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: Tuple,A_: Dict=None,A_: Dict=None,**A_: str ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: Optional[int],A_: Union[str, Any]=None,A_: int=None,A_: List[Any]=None,**A_: Optional[Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: Dict,*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 1 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """lxmert"""
_lowercase = {}
def __init__( self: Dict,A_: Optional[int]=3_0522,A_: List[Any]=768,A_: int=12,A_: Optional[int]=9500,A_: Optional[int]=1600,A_: List[str]=400,A_: Dict=3072,A_: Tuple="gelu",A_: Optional[int]=0.1,A_: str=0.1,A_: Optional[Any]=512,A_: List[Any]=2,A_: List[str]=0.0_2,A_: List[str]=1E-12,A_: Union[str, Any]=9,A_: Optional[int]=5,A_: Any=5,A_: Optional[Any]=2048,A_: Union[str, Any]=4,A_: Any=6.6_7,A_: Tuple=True,A_: List[str]=True,A_: Dict=True,A_: List[Any]=True,A_: List[str]=True,A_: Optional[Any]=True,A_: Optional[Any]=True,**A_: Dict,):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = num_qa_labels
__UpperCamelCase = num_object_labels
__UpperCamelCase = num_attr_labels
__UpperCamelCase = l_layers
__UpperCamelCase = x_layers
__UpperCamelCase = r_layers
__UpperCamelCase = visual_feat_dim
__UpperCamelCase = visual_pos_dim
__UpperCamelCase = visual_loss_normalizer
__UpperCamelCase = task_matched
__UpperCamelCase = task_mask_lm
__UpperCamelCase = task_obj_predict
__UpperCamelCase = task_qa
__UpperCamelCase = visual_obj_loss
__UpperCamelCase = visual_attr_loss
__UpperCamelCase = visual_feat_loss
__UpperCamelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**A_ )
| 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 1 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( _lowercase , _lowercase=() , _lowercase=None , _lowercase="no" , _lowercase="29500" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = False
__UpperCamelCase = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
__UpperCamelCase = True
elif "IPython" in sys.modules:
__UpperCamelCase = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
__UpperCamelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
__UpperCamelCase = 8
__UpperCamelCase = PrepareForLaunch(_lowercase , distributed_type='TPU' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_lowercase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowercase , master_addr='127.0.01' , master_port=_lowercase , mixed_precision=_lowercase ):
__UpperCamelCase = PrepareForLaunch(_lowercase , distributed_type='MULTI_GPU' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__UpperCamelCase = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_lowercase )
def _A ( _lowercase , _lowercase=() , _lowercase=2 ) -> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowercase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
__UpperCamelCase = PrepareForLaunch(_lowercase , debug=_lowercase )
start_processes(_lowercase , args=_lowercase , nprocs=_lowercase , start_method='fork' )
| 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """xlnet"""
_lowercase = ["""mems"""]
_lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Tuple,A_: Optional[int]=3_2000,A_: Tuple=1024,A_: List[Any]=24,A_: Union[str, Any]=16,A_: Union[str, Any]=4096,A_: Dict="gelu",A_: Dict=True,A_: str="bi",A_: Dict=0.0_2,A_: Optional[int]=1E-12,A_: Optional[Any]=0.1,A_: Union[str, Any]=512,A_: str=None,A_: Optional[Any]=True,A_: Union[str, Any]=False,A_: int=False,A_: List[str]=-1,A_: Any=False,A_: Dict="last",A_: int=True,A_: List[str]="tanh",A_: Optional[int]=0.1,A_: int=5,A_: Any=5,A_: Optional[int]=5,A_: Tuple=1,A_: List[str]=2,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.',A_,)
__UpperCamelCase = kwargs['use_cache']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self: int,A_: List[Any] ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 1 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case = logging.get_logger(__name__)
__snake_case = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A ( _lowercase ) -> str:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase = model_type_to_module_name(_lowercase )
__UpperCamelCase = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '__name__' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase = importlib.import_module('transformers' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def _A ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowercase , encoding='utf-8' ) as reader:
return json.load(_lowercase )
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(A_ )
def snake_case_ ( cls: List[Any],A_: Dict,**A_: int ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('config',A_ )
__UpperCamelCase = kwargs.pop('trust_remote_code',A_ )
__UpperCamelCase = True
__UpperCamelCase, __UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(A_,**A_ )
__UpperCamelCase = config_dict.get('feature_extractor_type',A_ )
__UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map',{} ):
__UpperCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A_,A_ ):
__UpperCamelCase = AutoConfig.from_pretrained(A_,**A_ )
# It could be in `config.feature_extractor_type``
__UpperCamelCase = getattr(A_,'feature_extractor_type',A_ )
if hasattr(A_,'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
__UpperCamelCase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__UpperCamelCase = feature_extractor_class_from_name(A_ )
__UpperCamelCase = feature_extractor_auto_map is not None
__UpperCamelCase = feature_extractor_class is not None or type(A_ ) in FEATURE_EXTRACTOR_MAPPING
__UpperCamelCase = resolve_trust_remote_code(
A_,A_,A_,A_ )
if has_remote_code and trust_remote_code:
__UpperCamelCase = get_class_from_dynamic_module(
A_,A_,**A_ )
__UpperCamelCase = kwargs.pop('code_revision',A_ )
if os.path.isdir(A_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A_,**A_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A_,**A_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A_ ) in FEATURE_EXTRACTOR_MAPPING:
__UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(A_ )]
return feature_extractor_class.from_dict(A_,**A_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def snake_case_ ( A_: Any,A_: Tuple ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(A_,A_ )
| 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__snake_case = '''Usage of script: script_name <size_of_canvas:int>'''
__snake_case = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def _A ( _lowercase ) -> list[list[bool]]:
"""simple docstring"""
__UpperCamelCase = [[False for i in range(_lowercase )] for j in range(_lowercase )]
return canvas
def _A ( _lowercase ) -> None:
"""simple docstring"""
for i, row in enumerate(_lowercase ):
for j, _ in enumerate(_lowercase ):
__UpperCamelCase = bool(random.getrandbits(1 ) )
def _A ( _lowercase ) -> list[list[bool]]:
"""simple docstring"""
__UpperCamelCase = np.array(_lowercase )
__UpperCamelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowercase ):
for c, pt in enumerate(_lowercase ):
__UpperCamelCase = __judge_point(
_lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__UpperCamelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__UpperCamelCase = current_canvas.tolist()
return return_canvas
def _A ( _lowercase , _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__UpperCamelCase = pt
if pt:
if alive < 2:
__UpperCamelCase = False
elif alive == 2 or alive == 3:
__UpperCamelCase = True
elif alive > 3:
__UpperCamelCase = False
else:
if alive == 3:
__UpperCamelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__snake_case = int(sys.argv[1])
# main working structure of this module.
__snake_case = create_canvas(canvas_size)
seed(c)
__snake_case , __snake_case = plt.subplots()
fig.show()
__snake_case = ListedColormap(['''w''', '''k'''])
try:
while True:
__snake_case = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case_ ( self: Tuple,A_: List[str],A_: str,A_: str ):
'''simple docstring'''
__UpperCamelCase = ZeroShotClassificationPipeline(
model=A_,tokenizer=A_,candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case_ ( self: Optional[Any],A_: Tuple,A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = classifier('Who are you voting for in 2020?',candidate_labels='politics' )
self.assertEqual(A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
# No kwarg
__UpperCamelCase = classifier('Who are you voting for in 2020?',['politics'] )
self.assertEqual(A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
__UpperCamelCase = classifier('Who are you voting for in 2020?',candidate_labels=['politics'] )
self.assertEqual(A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
__UpperCamelCase = classifier('Who are you voting for in 2020?',candidate_labels='politics, public health' )
self.assertEqual(
A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ),1.0 )
__UpperCamelCase = classifier('Who are you voting for in 2020?',candidate_labels=['politics', 'public health'] )
self.assertEqual(
A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ),1.0 )
__UpperCamelCase = classifier(
'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template='This text is about {}' )
self.assertEqual(A_,{'sequence': ANY(A_ ), 'labels': [ANY(A_ )], 'scores': [ANY(A_ )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCamelCase = classifier(['I am happy'],['positive', 'negative'] )
self.assertEqual(
A_,[
{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]}
for i in range(1 )
],)
__UpperCamelCase = classifier(['I am happy', 'I am sad'],['positive', 'negative'] )
self.assertEqual(
A_,[
{'sequence': ANY(A_ ), 'labels': [ANY(A_ ), ANY(A_ )], 'scores': [ANY(A_ ), ANY(A_ )]}
for i in range(2 )
],)
with self.assertRaises(A_ ):
classifier('',candidate_labels='politics' )
with self.assertRaises(A_ ):
classifier(A_,candidate_labels='politics' )
with self.assertRaises(A_ ):
classifier('Who are you voting for in 2020?',candidate_labels='' )
with self.assertRaises(A_ ):
classifier('Who are you voting for in 2020?',candidate_labels=A_ )
with self.assertRaises(A_ ):
classifier(
'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template='Not formatting template',)
with self.assertRaises(A_ ):
classifier(
'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template=A_,)
self.run_entailment_id(A_ )
def snake_case_ ( self: List[str],A_: Pipeline ):
'''simple docstring'''
__UpperCamelCase = zero_shot_classifier.model.config
__UpperCamelCase = config.labelaid
__UpperCamelCase = zero_shot_classifier.entailment_id
__UpperCamelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id,-1 )
__UpperCamelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id,0 )
__UpperCamelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id,0 )
__UpperCamelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id,2 )
__UpperCamelCase = original_labelaid
self.assertEqual(A_,zero_shot_classifier.entailment_id )
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='pt',)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100,candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='pt',)
__UpperCamelCase = zero_shot_classifier(
'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ),{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
},)
@require_tf
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='tf',)
__UpperCamelCase = zero_shot_classifier(
'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ),{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
},)
@slow
@require_torch
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = pipeline('zero-shot-classification',model='roberta-large-mnli',framework='pt' )
__UpperCamelCase = zero_shot_classifier(
'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ),{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
},)
__UpperCamelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.',candidate_labels=['machine learning', 'statistics', 'translation', 'vision'],multi_label=A_,)
self.assertEqual(
nested_simplify(A_ ),{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
},)
@slow
@require_tf
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = pipeline('zero-shot-classification',model='roberta-large-mnli',framework='tf' )
__UpperCamelCase = zero_shot_classifier(
'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(A_ ),{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
},)
__UpperCamelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.',candidate_labels=['machine learning', 'statistics', 'translation', 'vision'],multi_label=A_,)
self.assertEqual(
nested_simplify(A_ ),{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
},)
| 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 3
__UpperCamelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
from math import pow, sqrt
def _A ( *_lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(_lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def _A ( _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 1 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowercase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowercase , help='where to store parsed gold_data_path file' , )
__UpperCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__UpperCamelCase = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
__UpperCamelCase = dpr_record['question']
__UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowercase ) + '\n' )
if __name__ == "__main__":
main()
| 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor"""]
_lowercase = """SamImageProcessor"""
def __init__( self: Dict,A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = self.image_processor
__UpperCamelCase = -10
__UpperCamelCase = self.image_processor.size['longest_edge']
def __call__( self: Optional[Any],A_: Optional[int]=None,A_: int=None,A_: str=None,A_: str=None,A_: Optional[Union[str, TensorType]] = None,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_,)
# pop arguments that are not used in the foward but used nevertheless
__UpperCamelCase = encoding_image_processor['original_sizes']
if hasattr(A_,'numpy' ): # Checks if Torch or TF tensor
__UpperCamelCase = original_sizes.numpy()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._check_and_preprocess_points(
input_points=A_,input_labels=A_,input_boxes=A_,)
__UpperCamelCase = self._normalize_and_convert(
A_,A_,input_points=A_,input_labels=A_,input_boxes=A_,return_tensors=A_,)
return encoding_image_processor
def snake_case_ ( self: Tuple,A_: Any,A_: str,A_: Dict=None,A_: Dict=None,A_: int=None,A_: List[Any]="pt",):
'''simple docstring'''
if input_points is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0] ) for point in input_points
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_ )
for point, original_size in zip(A_,A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__UpperCamelCase, __UpperCamelCase = self._pad_points_and_labels(A_,A_ )
__UpperCamelCase = np.array(A_ )
if input_labels is not None:
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0],is_bounding_box=A_ )
for box in input_boxes
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_,is_bounding_box=A_ )
for box, original_size in zip(A_,A_ )
]
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def snake_case_ ( self: List[str],A_: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = max([point.shape[0] for point in input_points] )
__UpperCamelCase = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
__UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__UpperCamelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(A_ )
__UpperCamelCase = processed_input_points
return input_points, input_labels
def snake_case_ ( self: Optional[int],A_: int,A_: np.ndarray,A_: Union[str, Any],A_: int=False ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = original_size
__UpperCamelCase, __UpperCamelCase = self.image_processor._get_preprocess_shape(A_,longest_edge=A_ )
__UpperCamelCase = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,2,2 )
__UpperCamelCase = coords[..., 0] * (new_w / old_w)
__UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,4 )
return coords
def snake_case_ ( self: Dict,A_: Optional[Any]=None,A_: List[str]=None,A_: Tuple=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(A_,'numpy' ): # Checks for TF or Torch tensor
__UpperCamelCase = input_points.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_points[0],A_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ) for input_point in input_points]
else:
__UpperCamelCase = None
if input_labels is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_labels[0],A_ ):
raise ValueError('Input labels must be a list of list integers.' )
__UpperCamelCase = [np.array(A_ ) for label in input_labels]
else:
__UpperCamelCase = None
if input_boxes is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(A_,A_ )
or not isinstance(input_boxes[0],A_ )
or not isinstance(input_boxes[0][0],A_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
__UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def snake_case_ ( self: int,*A_: int,**A_: Tuple ):
'''simple docstring'''
return self.image_processor.post_process_masks(*A_,**A_ )
| 1 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 1 |
__snake_case = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MobileViTFeatureExtractor''']
__snake_case = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = IFImgaImgSuperResolutionPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case_ ( self: Any ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case_ ( self: List[str],A_: int,A_: Union[str, Any]=0 ):
'''simple docstring'''
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = floats_tensor((1, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, 3, 16, 16),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda',reason='float16 requires CUDA' )
def snake_case_ ( self: str ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case_ ( self: int ):
'''simple docstring'''
self._test_save_load_local()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2,)
| 1 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case = {'''UserAgent''': UserAgent().random}
def _A ( _lowercase ) -> dict:
"""simple docstring"""
__UpperCamelCase = script.contents[0]
__UpperCamelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCamelCase :
def __init__( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = F'''https://www.instagram.com/{username}/'''
__UpperCamelCase = self.get_json()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = requests.get(self.url,headers=A_ ).text
__UpperCamelCase = BeautifulSoup(A_,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: List[Any] ):
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: str ):
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return self.user_data["is_private"]
def _A ( _lowercase = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__UpperCamelCase = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = InstagramUser('''github''')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _lowercase )
__UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__UpperCamelCase = dataset_size < in_memory_max_size
else:
__UpperCamelCase = False
__UpperCamelCase = is_small_dataset(_lowercase )
assert result == expected
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase (_a ):
def __init__( self: List[str],*A_: Dict,**A_: Tuple ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.',A_,)
super().__init__(*A_,**A_ )
| 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__snake_case = {
'''google/pegasus-xsum''': 5_1_2,
}
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: List[Any],A_: Dict,A_: Optional[int]="<pad>",A_: Tuple="</s>",A_: int="<unk>",A_: Tuple="<mask_2>",A_: Optional[int]="<mask_1>",A_: str=None,A_: List[Any]=103,A_: Optional[Dict[str, Any]] = None,**A_: Dict,):
'''simple docstring'''
__UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(A_,A_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(A_ )}, but is'''
F''' {type(A_ )}''' )
__UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(A_ ),self.offset - 1 )
]
if len(set(A_ ) ) != len(A_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__UpperCamelCase = additional_special_tokens_extended
else:
__UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2,self.offset )]
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_,unk_token=A_,mask_token=A_,pad_token=A_,mask_token_sent=A_,offset=A_,additional_special_tokens=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,)
__UpperCamelCase = mask_token_sent
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# add special tokens to encoder dict
__UpperCamelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1,self.offset - 1 )} )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self: Dict,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self: Optional[int],A_: str ):
'''simple docstring'''
return self.sp_model.encode(A_,out_type=A_ )
def snake_case_ ( self: str,A_: str ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__UpperCamelCase = self.sp_model.piece_to_id(A_ )
return sp_id + self.offset
def snake_case_ ( self: Union[str, Any],A_: int ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__UpperCamelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case_ ( self: Union[str, Any],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def snake_case_ ( self: Dict,A_: Any=False ):
'''simple docstring'''
return 1
def snake_case_ ( self: Union[str, Any],A_: Dict ):
'''simple docstring'''
__UpperCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case_ ( self: Optional[Any],A_: List,A_: Optional[List] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(A_ )
elif token_ids_a is None:
return self._special_token_mask(A_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case_ ( self: Optional[Any],A_: Optional[int],A_: Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self: List[str],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_,'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 1 |
from functools import reduce
__snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _A ( _lowercase = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCamelCase :
def __init__( self: Tuple,A_: List[str],A_: List[Any]=2,A_: Union[str, Any]=True,A_: Dict=False,A_: Union[str, Any]=10,A_: Any=3,A_: Any=32 * 4,A_: int=32 * 6,A_: Optional[int]=4,A_: List[str]=32,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = is_training
__UpperCamelCase = use_auxiliary_loss
__UpperCamelCase = num_queries
__UpperCamelCase = num_channels
__UpperCamelCase = min_size
__UpperCamelCase = max_size
__UpperCamelCase = num_labels
__UpperCamelCase = mask_feature_size
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
__UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size],device=A_ )
__UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size],device=A_ ) > 0.5
).float()
__UpperCamelCase = (torch.rand((self.batch_size, self.num_labels),device=A_ ) > 0.5).long()
__UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1],),decoder_config=DetrConfig(
decoder_ffn_dim=128,num_queries=self.num_queries,decoder_attention_heads=2,d_model=self.mask_feature_size,),mask_feature_size=self.mask_feature_size,fpn_feature_size=self.mask_feature_size,num_channels=self.num_channels,num_labels=self.num_labels,)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def snake_case_ ( self: int,A_: str,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = output.encoder_hidden_states
__UpperCamelCase = output.pixel_decoder_hidden_states
__UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ),config.decoder_config.decoder_layers )
def snake_case_ ( self: List[Any],A_: Dict,A_: Any,A_: Optional[int],A_: List[Any]=False ):
'''simple docstring'''
with torch.no_grad():
__UpperCamelCase = MaskFormerModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(pixel_values=A_,pixel_mask=A_ )
__UpperCamelCase = model(A_,output_hidden_states=A_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape,(self.batch_size, self.num_queries, self.mask_feature_size),)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_,A_ )
def snake_case_ ( self: List[str],A_: str,A_: List[Any],A_: List[str],A_: str,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = MaskFormerForInstanceSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_: Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCamelCase = model(pixel_values=A_,pixel_mask=A_ )
__UpperCamelCase = model(A_ )
comm_check_on_output(A_ )
__UpperCamelCase = model(
pixel_values=A_,pixel_mask=A_,mask_labels=A_,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape,torch.Size([1] ) )
@require_torch
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowercase = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = MaskFormerModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,has_text_modality=A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(A_,**A_,output_hidden_states=A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def snake_case_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self: Any ):
'''simple docstring'''
pass
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1],A_ )
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
__UpperCamelCase = MaskFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = (self.model_tester.min_size,) * 2
__UpperCamelCase = {
'pixel_values': torch.randn((2, 3, *size),device=A_ ),
'mask_labels': torch.randn((2, 10, *size),device=A_ ),
'class_labels': torch.zeros(2,10,device=A_ ).long(),
}
__UpperCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(A_ )
__UpperCamelCase = model(**A_ )
self.assertTrue(outputs.loss is not None )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(A_,**A_,output_hidden_states=A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ ).to(A_ )
__UpperCamelCase = model(**A_,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def snake_case_ ( self: Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
__UpperCamelCase = model(A_,mask_labels=A_,class_labels=A_ ).loss
loss.backward()
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.all_model_classes[1]
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
__UpperCamelCase = model(A_,mask_labels=A_,class_labels=A_ )
__UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__snake_case = 1e-4
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCamelCase (unittest.TestCase ):
@cached_property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(A_ )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(A_,return_tensors='pt' ).to(A_ )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_,(1, 3, 800, 1088) )
with torch.no_grad():
__UpperCamelCase = model(**A_ )
__UpperCamelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3],A_,atol=A_ ) )
__UpperCamelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3],A_,atol=A_ ) )
__UpperCamelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3],A_,atol=A_ ) )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(A_ )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(A_,return_tensors='pt' ).to(A_ )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_,(1, 3, 800, 1088) )
with torch.no_grad():
__UpperCamelCase = model(**A_ )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),)
__UpperCamelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
__UpperCamelCase = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],A_,atol=A_ ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],A_,atol=A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(A_ )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(A_,return_tensors='pt' ).to(A_ )
__UpperCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_,(1, 3, 800, 1088) )
with torch.no_grad():
__UpperCamelCase = model(**A_ )
# masks_queries_logits
__UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),)
__UpperCamelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
__UpperCamelCase = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],A_,atol=A_ ) )
# class_queries_logits
__UpperCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],A_,atol=A_ ) )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(A_ )
.eval()
)
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )],segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )],return_tensors='pt',)
__UpperCamelCase = inputs['pixel_values'].to(A_ )
__UpperCamelCase = [el.to(A_ ) for el in inputs['mask_labels']]
__UpperCamelCase = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
__UpperCamelCase = model(**A_ )
self.assertTrue(outputs.loss is not None )
| 1 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase (_a , _a ):
@register_to_config
def __init__( self: str,A_: bool,A_: Optional[int] = None,A_: Optional[int] = None ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCamelCase = torch.zeros(A_,A_ )
else:
__UpperCamelCase = None
__UpperCamelCase = torch.nn.Parameter(A_ )
class __lowerCamelCase (_a ):
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
def __init__( self: Any,A_: VQModel,A_: CLIPTextModel,A_: CLIPTokenizer,A_: TransformeraDModel,A_: VQDiffusionScheduler,A_: LearnedClassifierFreeSamplingEmbeddings,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_,transformer=A_,text_encoder=A_,tokenizer=A_,scheduler=A_,learned_classifier_free_sampling_embeddings=A_,)
def snake_case_ ( self: Dict,A_: Optional[Any],A_: Any,A_: str ):
'''simple docstring'''
__UpperCamelCase = len(A_ ) if isinstance(A_,A_ ) else 1
# get prompt text embeddings
__UpperCamelCase = self.tokenizer(
A_,padding='max_length',max_length=self.tokenizer.model_max_length,return_tensors='pt',)
__UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1,keepdim=A_ )
# duplicate text embeddings for each generation per prompt
__UpperCamelCase = prompt_embeds.repeat_interleave(A_,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_,1,1 )
else:
__UpperCamelCase = [''] * batch_size
__UpperCamelCase = text_input_ids.shape[-1]
__UpperCamelCase = self.tokenizer(
A_,padding='max_length',max_length=A_,truncation=A_,return_tensors='pt',)
__UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1,keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCamelCase = negative_prompt_embeds.shape[1]
__UpperCamelCase = negative_prompt_embeds.repeat(1,A_,1 )
__UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt,A_,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: List[str],A_: Union[str, List[str]],A_: int = 100,A_: float = 5.0,A_: float = 1.0,A_: int = 1,A_: Optional[Union[torch.Generator, List[torch.Generator]]] = None,A_: Optional[torch.FloatTensor] = None,A_: Optional[str] = "pil",A_: bool = True,A_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,A_: int = 1,):
'''simple docstring'''
if isinstance(A_,A_ ):
__UpperCamelCase = 1
elif isinstance(A_,A_ ):
__UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
__UpperCamelCase = batch_size * num_images_per_prompt
__UpperCamelCase = guidance_scale > 1.0
__UpperCamelCase = self._encode_prompt(A_,A_,A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_,A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
__UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCamelCase = self.transformer.num_vector_embeds - 1
__UpperCamelCase = torch.full(A_,A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_,device=self.device )
__UpperCamelCase = self.scheduler.timesteps.to(self.device )
__UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
__UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCamelCase = self.transformer(A_,encoder_hidden_states=A_,timestep=A_ ).sample
if do_classifier_free_guidance:
__UpperCamelCase, __UpperCamelCase = model_output.chunk(2 )
__UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_,dim=1,keepdim=A_ )
__UpperCamelCase = self.truncate(A_,A_ )
# remove `log(0)`'s (`-inf`s)
__UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(A_,timestep=A_,sample=A_,generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_,A_,A_ )
__UpperCamelCase = self.vqvae.config.vq_embed_dim
__UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_,shape=A_ )
__UpperCamelCase = self.vqvae.decode(A_,force_not_quantize=A_ ).sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0,1 )
__UpperCamelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def snake_case_ ( self: int,A_: torch.FloatTensor,A_: float ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = torch.sort(A_,1,descending=A_ )
__UpperCamelCase = torch.exp(A_ )
__UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :],A_ )
__UpperCamelCase = torch.cat((all_true, keep_mask),dim=1 )
__UpperCamelCase = keep_mask[:, :-1, :]
__UpperCamelCase = keep_mask.gather(1,indices.argsort(1 ) )
__UpperCamelCase = log_p_x_0.clone()
__UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__snake_case = get_tests_dir('''fixtures/dummy-config.json''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 0
def snake_case_ ( self: str ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(A_,A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCamelCase = os.path.join(A_,'fake-roberta' )
os.makedirs(A_,exist_ok=A_ )
with open(os.path.join(A_,'config.json' ),'w' ) as f:
f.write(json.dumps({} ) )
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertEqual(type(A_ ),A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
try:
AutoConfig.register('custom',A_ )
# Wrong model type will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('model',A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('bert',A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase = AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_,A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoConfig.from_pretrained('bert-base' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoConfig.from_pretrained(A_,revision='aaaaaa' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
A_,'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.',):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
with self.assertRaises(A_ ):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase = AutoConfig.from_pretrained(A_,trust_remote_code=A_ )
self.assertEqual(reloaded_config.__class__.__name__,'NewModelConfig' )
def snake_case_ ( self: str ):
'''simple docstring'''
class __lowerCamelCase (_a ):
_lowercase = """new-model"""
try:
AutoConfig.register('new-model',A_ )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__,'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model',trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__,'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__UpperCamelCase = hex_num[0] == '-'
if is_negative:
__UpperCamelCase = hex_num[1:]
try:
__UpperCamelCase = int(_lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__UpperCamelCase = ''
while int_num > 0:
__UpperCamelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """CLIPImageProcessor"""
_lowercase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: List[str],A_: int=None,A_: str=None,**A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: Tuple,A_: Any=None,A_: str=None,A_: List[Any]=None,**A_: Tuple ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: int,*A_: Dict,**A_: int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Union[str, Any],**A_: str ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """AutoImageProcessor"""
_lowercase = """AutoTokenizer"""
def __init__( self: int,A_: Any,A_: Tuple ):
'''simple docstring'''
super().__init__(A_,A_ )
__UpperCamelCase = self.image_processor
def __call__( self: str,A_: Union[str, Any]=None,A_: Optional[int]=None,A_: Optional[int]=None,**A_: Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Dict,*A_: Optional[Any],**A_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Union[str, Any],**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 1 |
from __future__ import annotations
class __lowerCamelCase :
def __init__( self: Optional[int],A_: str,A_: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = text, pattern
__UpperCamelCase, __UpperCamelCase = len(A_ ), len(A_ )
def snake_case_ ( self: Tuple,A_: str ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if char == self.pattern[i]:
return i
return -1
def snake_case_ ( self: List[str],A_: int ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
__UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = '''ABAABA'''
__snake_case = '''AB'''
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 1 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 1 |
import unittest
import numpy as np
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = None , ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.shape(_lowercase )
__UpperCamelCase = np.shape(_lowercase )
__UpperCamelCase = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
__UpperCamelCase = (
'Expected the same number of rows for A and B. '
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
__UpperCamelCase = (
'Expected the same number of columns for B and C. '
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
__UpperCamelCase = pseudo_inv
if a_inv is None:
try:
__UpperCamelCase = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCamelCase = np.array([[2, 1], [6, 3]] )
__UpperCamelCase = schur_complement(A_,A_,A_ )
__UpperCamelCase = np.block([[a, b], [b.T, c]] )
__UpperCamelCase = np.linalg.det(A_ )
__UpperCamelCase = np.linalg.det(A_ )
__UpperCamelCase = np.linalg.det(A_ )
self.assertAlmostEqual(A_,det_a * det_s )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCamelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A_ ):
schur_complement(A_,A_,A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCamelCase = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCamelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A_ ):
schur_complement(A_,A_,A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ) -> Any:
"""simple docstring"""
def get_dataset(_lowercase ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(_lowercase )
__UpperCamelCase = get_dataset(_lowercase )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase, __UpperCamelCase = batch
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase (nn.Module ):
def __init__( self: Optional[Any] ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self: Tuple,A_: Tuple ):
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1,project_dir=A_,automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ),1 )
def snake_case_ ( self: Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
__UpperCamelCase = os.path.join(A_,'initial' )
accelerator.save_state(A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
accelerator.load_state(A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
__UpperCamelCase = train(2,A_,A_,A_,A_ )
# Save everything
__UpperCamelCase = os.path.join(A_,'checkpoint' )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1,automatic_checkpoint_naming=A_ )
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_0' ) )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
__UpperCamelCase = train(2,A_,A_,A_,A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_1' ) )
test_rands += train(1,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_,A_,A_,A_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(A_,step_size=1,gamma=0.9_9 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3,A_,A_,A_,A_,A_ )
self.assertNotEqual(A_,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_0' ) )
self.assertEqual(A_,scheduler.state_dict() )
def snake_case_ ( self: str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_,total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_,'checkpoints','checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_,'checkpoints','checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_,'checkpoints','checkpoint_10' ) ) )
@require_cuda
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = '''/tmp/accelerate/state_checkpointing'''
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCamelCase :
def __init__( self: Any ):
'''simple docstring'''
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 256
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
def snake_case_ ( self: List[Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = cva.imread(A_,0 )
__UpperCamelCase = copy.deepcopy(self.img )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = plt.hist(self.img.ravel(),256,[0, 256],label='x' )
__UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
__UpperCamelCase = x[i] / self.k
self.sk += prk
__UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__UpperCamelCase = int(last % last )
__UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
__UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
__UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg',self.img )
def snake_case_ ( self: Dict ):
'''simple docstring'''
plt.hist(self.img.ravel(),256,[0, 256] )
def snake_case_ ( self: str ):
'''simple docstring'''
cva.imshow('Output-Image',self.img )
cva.imshow('Input-Image',self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__snake_case = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__snake_case , __snake_case , __snake_case = False, False, False
@dataclass
class __lowerCamelCase :
_lowercase = None
_lowercase = True
_lowercase = True
_lowercase = None
# Automatically constructed
_lowercase = "dict"
_lowercase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
_lowercase = field(default="""Audio""" , init=_a , repr=_a )
def __call__( self: Optional[int] ):
'''simple docstring'''
return self.pa_type
def snake_case_ ( self: Tuple,A_: Union[str, bytes, dict] ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(A_,A_ ):
return {"bytes": None, "path": value}
elif isinstance(A_,A_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__UpperCamelCase = BytesIO()
sf.write(A_,value['array'],value['sampling_rate'],format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__UpperCamelCase = np.frombuffer(value['bytes'],dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__UpperCamelCase = np.memmap(value['path'],dtype='h',mode='r' ).astype(np.floataa ) / 3_2767
__UpperCamelCase = BytesIO(bytes() )
sf.write(A_,A_,value['sampling_rate'],format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case_ ( self: List[Any],A_: dict,A_: Optional[Dict[str, Union[str, bool, None]]] = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__UpperCamelCase, __UpperCamelCase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__UpperCamelCase = xsplitext(A_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__UpperCamelCase = token_per_repo_id or {}
__UpperCamelCase = path.split('::' )[-1]
try:
__UpperCamelCase = string_to_dict(A_,config.HUB_DATASETS_URL )['repo_id']
__UpperCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__UpperCamelCase = None
with xopen(A_,'rb',use_auth_token=A_ ) as f:
__UpperCamelCase, __UpperCamelCase = sf.read(A_ )
else:
__UpperCamelCase, __UpperCamelCase = sf.read(A_ )
__UpperCamelCase = array.T
if self.mono:
__UpperCamelCase = librosa.to_mono(A_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__UpperCamelCase = librosa.resample(A_,orig_sr=A_,target_sr=self.sampling_rate )
__UpperCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def snake_case_ ( self: str,A_: Union[pa.StringArray, pa.StructArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__UpperCamelCase = pa.array([None] * len(A_ ),type=pa.binary() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage],['bytes', 'path'],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCamelCase = pa.array([None] * len(A_ ),type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([storage, path_array],['bytes', 'path'],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__UpperCamelCase = pa.array([Audio().encode_example(A_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__UpperCamelCase = storage.field('bytes' )
else:
__UpperCamelCase = pa.array([None] * len(A_ ),type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__UpperCamelCase = storage.field('path' )
else:
__UpperCamelCase = pa.array([None] * len(A_ ),type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array],['bytes', 'path'],mask=storage.is_null() )
return array_cast(A_,self.pa_type )
def snake_case_ ( self: Optional[Any],A_: pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(A_: Union[str, Any] ):
with xopen(A_,'rb' ) as f:
__UpperCamelCase = f.read()
return bytes_
__UpperCamelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
__UpperCamelCase = pa.array(
[os.path.basename(A_ ) if path is not None else None for path in storage.field('path' ).to_pylist()],type=pa.string(),)
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array],['bytes', 'path'],mask=bytes_array.is_null() )
return array_cast(A_,self.pa_type )
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__snake_case = Mapping[str, np.ndarray]
__snake_case = Mapping[str, Any] # Is a nested dict.
__snake_case = 0.01
@dataclasses.dataclass(frozen=_a )
class __lowerCamelCase :
_lowercase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowercase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowercase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowercase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowercase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowercase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowercase = None
# Templates used to generate this protein (prediction-only)
_lowercase = None
# Chain corresponding to each parent
_lowercase = None
def _A ( _lowercase ) -> Protein:
"""simple docstring"""
__UpperCamelCase = r'(\[[A-Z]+\]\n)'
__UpperCamelCase = [tag.strip() for tag in re.split(_lowercase , _lowercase ) if len(_lowercase ) > 0]
__UpperCamelCase = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
__UpperCamelCase = ["N", "CA", "C"]
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__UpperCamelCase = g[1][0].strip()
for i in range(len(_lowercase ) ):
if seq[i] not in residue_constants.restypes:
__UpperCamelCase = 'X' # FIXME: strings are immutable
__UpperCamelCase = np.array(
[residue_constants.restype_order.get(_lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__UpperCamelCase = []
for axis in range(3 ):
tertiary.append(list(map(_lowercase , g[1][axis].split() ) ) )
__UpperCamelCase = np.array(_lowercase )
__UpperCamelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__UpperCamelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__UpperCamelCase = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
__UpperCamelCase = np.zeros(
(
len(_lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowercase ):
__UpperCamelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowercase , atom_mask=_lowercase , aatype=_lowercase , residue_index=np.arange(len(_lowercase ) ) , b_factors=_lowercase , )
def _A ( _lowercase , _lowercase = 0 ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
__UpperCamelCase = prot.parents
__UpperCamelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__UpperCamelCase = [p for i, p in zip(_lowercase , _lowercase ) if i == chain_id]
if parents is None or len(_lowercase ) == 0:
__UpperCamelCase = ['N/A']
pdb_headers.append(f'''PARENT {' '.join(_lowercase )}''' )
return pdb_headers
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = pdb_str.split('\n' )
__UpperCamelCase = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
__UpperCamelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__UpperCamelCase = []
if prot.parents_chain_index is not None:
__UpperCamelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowercase ) , [] )
parent_dict[str(_lowercase )].append(_lowercase )
__UpperCamelCase = max([int(_lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__UpperCamelCase = parent_dict.get(str(_lowercase ) , ['N/A'] )
parents_per_chain.append(_lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__UpperCamelCase = [['N/A']]
def make_parent_line(_lowercase ) -> str:
return f'''PARENT {' '.join(_lowercase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__UpperCamelCase = 0
for i, l in enumerate(_lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowercase ):
__UpperCamelCase = parents_per_chain[chain_counter]
else:
__UpperCamelCase = ['N/A']
out_pdb_lines.append(make_parent_line(_lowercase ) )
return "\n".join(_lowercase )
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = residue_constants.restypes + ['X']
def res_atoa(_lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
__UpperCamelCase = residue_constants.atom_types
__UpperCamelCase = []
__UpperCamelCase = prot.atom_mask
__UpperCamelCase = prot.aatype
__UpperCamelCase = prot.atom_positions
__UpperCamelCase = prot.residue_index.astype(np.intaa )
__UpperCamelCase = prot.b_factors
__UpperCamelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__UpperCamelCase = get_pdb_headers(_lowercase )
if len(_lowercase ) > 0:
pdb_lines.extend(_lowercase )
__UpperCamelCase = aatype.shape[0]
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = string.ascii_uppercase
__UpperCamelCase = None
# Add all atom sites.
for i in range(_lowercase ):
__UpperCamelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__UpperCamelCase = 'ATOM'
__UpperCamelCase = atom_name if len(_lowercase ) == 4 else f''' {atom_name}'''
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = 1.00
__UpperCamelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
__UpperCamelCase = ''
__UpperCamelCase = 'A'
if chain_index is not None:
__UpperCamelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__UpperCamelCase = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(_lowercase )
atom_index += 1
__UpperCamelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__UpperCamelCase = True
__UpperCamelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__UpperCamelCase = 'TER'
__UpperCamelCase = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(_lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowercase , _lowercase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_lowercase )
def _A ( _lowercase ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_lowercase , remark=_lowercase , parents=_lowercase , parents_chain_index=_lowercase , )
| 1 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = TapasConfig.from_json_file(snake_case )
# set absolute/relative position embeddings parameter
__magic_name__ :Optional[int] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__magic_name__ :Any = TapasForQuestionAnswering(config=snake_case )
elif task == "WTQ":
# run_task_main.py hparams
__magic_name__ :Any = 4
__magic_name__ :int = True
# hparam_utils.py hparams
__magic_name__ :int = 0.66_4694
__magic_name__ :str = 0.20_7951
__magic_name__ :List[Any] = 0.12_1194
__magic_name__ :List[Any] = True
__magic_name__ :int = True
__magic_name__ :Tuple = False
__magic_name__ :Tuple = 0.035_2513
__magic_name__ :str = TapasForQuestionAnswering(config=snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__magic_name__ :Optional[Any] = 4
__magic_name__ :Union[str, Any] = False
# hparam_utils.py hparams
__magic_name__ :Optional[int] = 36.4519
__magic_name__ :Dict = 0.90_3421
__magic_name__ :Any = 222.088
__magic_name__ :str = True
__magic_name__ :int = True
__magic_name__ :int = True
__magic_name__ :List[str] = 0.76_3141
__magic_name__ :Optional[Any] = TapasForQuestionAnswering(config=snake_case )
elif task == "TABFACT":
__magic_name__ :str = TapasForSequenceClassification(config=snake_case )
elif task == "MLM":
__magic_name__ :Any = TapasForMaskedLM(config=snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
__magic_name__ :str = TapasModel(config=snake_case )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(snake_case, snake_case, snake_case )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__magic_name__ :List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''', model_max_length=5_1_2 )
tokenizer.save_pretrained(snake_case )
print('''Used relative position embeddings:''', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :int ) -> bool:
if len(_snake_case ) == 0:
return False
_A = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter numbers separated by comma:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
UpperCAmelCase_ = int(input("""Enter the number to be found in the list:\n""").strip())
UpperCAmelCase_ = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 2 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
import math
import qiskit
def A_( A : int = 1 , A : int = 1 , A : int = 1):
if (
isinstance(A , A)
or isinstance(A , A)
or isinstance(A , A)
):
raise TypeError('inputs must be integers.')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.')
if (
(math.floor(A) != input_a)
or (math.floor(A) != input_a)
or (math.floor(A) != carry_in)
):
raise ValueError('inputs must be exact integers.')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.')
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 , 'qr')
UpperCamelCase = qiskit.ClassicalRegister(2 , 'cr')
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(A , A)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(A) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , A) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator')
UpperCamelCase = qiskit.execute(A , A , shots=1000)
return job.result().get_counts(A)
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 3 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__UpperCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCamelCase : Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__UpperCamelCase : int = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_test_to_tester_mapping(_snake_case )
lowerCAmelCase = get_test_to_tester_mapping(_snake_case )
lowerCAmelCase = {'BertModelTest': 'BertModelTester'}
lowerCAmelCase = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_model_to_test_mapping(_snake_case )
lowerCAmelCase = get_model_to_test_mapping(_snake_case )
lowerCAmelCase = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_model_to_tester_mapping(_snake_case )
lowerCAmelCase = get_model_to_tester_mapping(_snake_case )
lowerCAmelCase = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
| 4 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''data2vec-text'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "LayoutLMv3ImageProcessor"
lowerCamelCase_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self :Dict , __A :Union[str, Any]=None , __A :Union[str, Any]=None , **__A :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :Dict , __A :List[str] , __A :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __A :Union[List[List[int]], List[List[List[int]]]] = None , __A :Optional[Union[List[int], List[List[int]]]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = None , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
SCREAMING_SNAKE_CASE__ = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE__ = features["""words"""]
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
SCREAMING_SNAKE_CASE__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE__ = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] )
SCREAMING_SNAKE_CASE__ = images
return encoded_inputs
def _snake_case ( self :str , __A :Optional[int] , __A :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def _snake_case ( self :List[Any] , *__A :int , **__A :Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Tuple , *__A :int , **__A :str ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : str=2 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : int=None , _UpperCAmelCase : Union[str, Any]=[0, 1, 2, 3] , ):
_A = parent
_A = 100
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase_ ( self : Dict ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
_A = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ):
_A = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any ):
_A = self.type_sequence_label_size
_A = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
_A = self.num_labels
_A = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
def lowerCAmelCase_ ( self : List[str] ):
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Optional[int] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Any ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ) -> int:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Any ):
_A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
_A = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
_A = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
_A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(_UpperCAmelCase )
_A = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
_A = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(_UpperCAmelCase )
_A = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
_A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
_A = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
_A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 7 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = ("DownEncoderBlock2D",) , _UpperCAmelCase = ("UpDecoderBlock2D",) , _UpperCAmelCase = (64,) , _UpperCAmelCase = 1 , _UpperCAmelCase = "silu" , _UpperCAmelCase = 3 , _UpperCAmelCase = 32 , _UpperCAmelCase = 256 , _UpperCAmelCase = 32 , _UpperCAmelCase = None , _UpperCAmelCase = 0.18215 , _UpperCAmelCase = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__A : Optional[int] = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
__A : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
__A : Union[str, Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1)
__A : List[Any] = VectorQuantizer(_UpperCAmelCase , _UpperCAmelCase , beta=0.25 , remap=_UpperCAmelCase , sane_index_shape=_UpperCAmelCase)
__A : Dict = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1)
# pass init params to Decoder
__A : Any = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , norm_type=_UpperCAmelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True):
'''simple docstring'''
__A : Optional[int] = self.encoder(_UpperCAmelCase)
__A : str = self.quant_conv(_UpperCAmelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_UpperCAmelCase)
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True):
'''simple docstring'''
if not force_not_quantize:
__A ,__A ,__A : Dict = self.quantize(_UpperCAmelCase)
else:
__A : int = h
__A : List[Any] = self.post_quant_conv(_UpperCAmelCase)
__A : Union[str, Any] = self.decoder(_UpperCAmelCase , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = True):
'''simple docstring'''
__A : Any = sample
__A : Optional[int] = self.encode(_UpperCAmelCase).latents
__A : Union[str, Any] = self.decode(_UpperCAmelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase) | 8 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : str , _snake_case : List[Any]=None , _snake_case : Union[str, Any]=20_48 ):
"""simple docstring"""
A__ = config.__dict__
A__ = modal_hidden_size
if num_labels:
A__ = num_labels
| 9 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "ibert"
def __init__( self : str , _A : Any=3_0522 , _A : List[Any]=768 , _A : Any=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Optional[int]="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : int=512 , _A : int=2 , _A : Union[str, Any]=0.02 , _A : Dict=1e-12 , _A : Tuple=1 , _A : str=0 , _A : Dict=2 , _A : Union[str, Any]="absolute" , _A : Optional[int]=False , _A : List[Any]="none" , **_A : Dict , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = quant_mode
_UpperCamelCase = force_dequant
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 10 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (DDPMScheduler,)
def a__ (self , **A ) -> Optional[int]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A )
return config
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def a__ (self ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def a__ (self ) -> str:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def a__ (self ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def a__ (self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def a__ (self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def a__ (self ) -> int:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
_a = len(A )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_a = model(A , A )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**A )
_a = len(A )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
_a = model(A , A )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
_a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A )
_a = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(A )
_a = prev_t.item()
self.assertEqual(A , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
_a = [100, 87, 50, 51, 0]
with self.assertRaises(A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
_a = [100, 87, 50, 1, 0]
_a = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 11 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a ,__a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_a : List[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[int]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 15 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : List[str] = get_logger()
__A : Optional[dict] = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict ):
super().__init__(features=__lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f"Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
f"device: {str(jax.devices()[0] )}." )
SCREAMING_SNAKE_CASE = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE = jnp_array_kwargs
@staticmethod
def _snake_case ( ):
import jax
return {str(__lowerCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Any , __lowerCamelCase : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCamelCase , axis=0 )
return column
def _snake_case ( self : int , __lowerCamelCase : Dict ):
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE = {"dtype": jnp.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE = np.asarray(__lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : List[Any] , __lowerCamelCase : int ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ):
SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : dict ):
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : pa.Table ):
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : pa.Table ):
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._consolidate(__lowerCamelCase )
return column
def _snake_case ( self : int , __lowerCamelCase : pa.Table ):
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch | 16 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
UpperCAmelCase_ : Optional[int] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __SCREAMING_SNAKE_CASE ( a__ : float ) -> str:
assert type(a__ ) in (int, float) and decimal == int(a__ )
__A : List[str] = int(a__ )
__A : List[Any] = """"""
__A : int = False
if decimal < 0:
__A : Tuple = True
decimal *= -1
while decimal > 0:
__A , __A : List[Any] = divmod(a__ ,16 )
__A : Union[str, Any] = values[remainder] + hexadecimal
__A : Optional[Any] = """0x""" + hexadecimal
if negative:
__A : Tuple = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_SCREAMING_SNAKE_CASE = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCAmelCase = int(re.match(R".*layer_(\d*).*" , SCREAMING_SNAKE_CASE_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
_lowerCAmelCase = re.search(R"[^\d](\d+)$" , str(SCREAMING_SNAKE_CASE_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
_lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if bloom_config_file == "":
_lowerCAmelCase = BloomConfig()
else:
_lowerCAmelCase = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
if shard_model:
_lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = {"weight_map": {}, "metadata": {}}
_lowerCAmelCase = 0
_lowerCAmelCase = None
_lowerCAmelCase = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE_ ):
print("Processing file: {}".format(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
_lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' )
_lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
# Rename keys in the transformers names
_lowerCAmelCase = list(temp.keys() )
for key in keys:
_lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
_lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE_ , os.path.join(
SCREAMING_SNAKE_CASE_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCAmelCase = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) )
_lowerCAmelCase = BloomConfig()
_lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
_lowerCAmelCase = total_size
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
_lowerCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = BloomModel(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("layer" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = None
for i, file in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
_lowerCAmelCase = file.replace("model_00" , F'''model_0{i}''' )
_lowerCAmelCase = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
# Rename keys in the transformers names
_lowerCAmelCase = list(temp.keys() )
for key in keys:
_lowerCAmelCase = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
_lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCAmelCase = tensors[key] / pretraining_tp
_lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_lowerCAmelCase = set(other_keys.missing_keys )
else:
_lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
_lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 18 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileBertTokenizer
lowercase__ = MobileBertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
lowercase__ = 'google/mobilebert-uncased'
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
_UpperCamelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file)
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 12, 10, 11])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
_UpperCamelCase = self.get_tokenizer(do_lower_case=__a)
_UpperCamelCase = self.get_rust_tokenizer(do_lower_case=__a)
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCamelCase = {}
for i, token in enumerate(__a):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''') else False
_UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ['''的''', '''人''', '''有''']
_UpperCamelCase = ''''''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
_UpperCamelCase = False
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
| 19 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: Tuple = '▁'
_lowerCAmelCase: List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =BertGenerationTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> Union[str, Any]:
super().setUp()
a__ =BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self) -> List[Any]:
a__ ='<s>'
a__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<pad>')
self.assertEqual(len(lowercase_) , 1002)
def __UpperCamelCase ( self) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_)
a__ =tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [285, 46, 10, 170, 382] , )
a__ =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a__ =tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a__ =tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __UpperCamelCase ( self) -> Optional[Any]:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
a__ ='Hello World!'
a__ =[18536, 2260, 101]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_))
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
a__ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_))
@require_torch
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a__ =list(self.big_tokenizer.get_vocab().keys())[:10]
a__ =' '.join(lowercase_)
a__ =self.big_tokenizer.encode_plus(lowercase_ , return_tensors='pt' , return_token_type_ids=lowercase_)
a__ =self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase_)
a__ =BertGenerationConfig()
a__ =BertGenerationEncoder(lowercase_)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_)
model(**lowercase_)
@slow
def __UpperCamelCase ( self) -> List[Any]:
# fmt: off
a__ ={'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 20 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
def lowerCAmelCase_ ( lowerCamelCase ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__magic_name__ : List[Any] =numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
__magic_name__ : Dict =numbers[i]
if number < 0:
__magic_name__ , __magic_name__ : str =min_till_now, max_till_now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , max_till_now * number )
__magic_name__ : Optional[Any] =min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
__magic_name__ : Union[str, Any] =max(lowerCamelCase , lowerCamelCase )
return max_prod
| 21 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case = '''
{0} = None
'''
__snake_case = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = _re_backend.findall(_lowercase )
if len(_lowercase ) == 0:
return None
return "_and_".join(_lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.join(_lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_lowercase ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(_lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_lowercase ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(_lowercase , _lowercase )
else:
return DUMMY_CLASS.format(_lowercase , _lowercase )
def _A ( _lowercase=None ) -> Optional[Any]:
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_lowercase , _lowercase ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _A ( _lowercase=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(_lowercase , 'utils' )
__UpperCamelCase = {
backend: os.path.join(_lowercase , f'''dummy_{short_names.get(_lowercase , _lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_lowercase ):
with open(_lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(_lowercase , _lowercase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_a = [True] * (num + 1)
_a = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
_a = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 22 |
import string
def _A ( _lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(_lowercase )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = input('Encrypted message: ' )
__UpperCamelCase = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = sd_pipe.prepare_inputs(_UpperCAmelCase )
UpperCamelCase_ = replicate(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
UpperCamelCase_ = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ = images[0, 253:256, 253:256, -1]
UpperCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'stabilityai/stable-diffusion-2'
UpperCamelCase_ , UpperCamelCase_ = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
UpperCamelCase_ , UpperCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
_UpperCAmelCase , scheduler=_UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ = scheduler_params
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = jax.device_count()
UpperCamelCase_ = num_samples * [prompt]
UpperCamelCase_ = sd_pipe.prepare_inputs(_UpperCAmelCase )
UpperCamelCase_ = replicate(_UpperCAmelCase )
UpperCamelCase_ = shard(_UpperCAmelCase )
UpperCamelCase_ = jax.random.PRNGKey(0 )
UpperCamelCase_ = jax.random.split(_UpperCAmelCase , jax.device_count() )
UpperCamelCase_ = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ = images[0, 253:256, 253:256, -1]
UpperCamelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 23 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
a_ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
a_ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__A )
class _UpperCamelCase :
'''simple docstring'''
def __call__( self : Union[str, Any] , a : str , a : Optional[str] = None , a : Optional[str] = None , a : Union[bool, str] = False , a : Union[bool, str] = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = None , **a : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
a , padding=a , truncation=a , max_length=a , return_tensors=a , return_attention_mask=a , **a , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : Tuple = titles if texts is None else texts
return super().__call__(
a , a , padding=a , truncation=a , max_length=a , return_tensors=a , return_attention_mask=a , **a , )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(a , a ) else [titles]
SCREAMING_SNAKE_CASE : Optional[int] = texts if not isinstance(a , a ) else [texts]
SCREAMING_SNAKE_CASE : str = len(a )
SCREAMING_SNAKE_CASE : Optional[Any] = questions if not isinstance(a , a ) else [questions] * n_passages
if len(a ) != len(a ):
raise ValueError(
F"There should be as many titles than texts but got {len(a )} titles and {len(a )} texts." )
SCREAMING_SNAKE_CASE : Tuple = super().__call__(a , a , padding=a , truncation=a )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[int] = super().__call__(a , add_special_tokens=a , padding=a , truncation=a )["input_ids"]
SCREAMING_SNAKE_CASE : int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a , a )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : Dict = attention_mask
return self.pad(a , padding=a , max_length=a , return_tensors=a )
def __UpperCamelCase ( self : List[str] , a : BatchEncoding , a : DPRReaderOutput , a : int = 16 , a : int = 64 , a : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = reader_input["input_ids"]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = reader_output[:3]
SCREAMING_SNAKE_CASE : Any = len(a )
SCREAMING_SNAKE_CASE : Any = sorted(range(a ) , reverse=a , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Optional[int] = len(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a , top_spans=a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a , start_index=a , end_index=a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : str , a : List[int] , a : List[int] , a : int , a : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for start_index, start_score in enumerate(a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : Tuple = sorted(a , key=lambda a : x[1] , reverse=a )
SCREAMING_SNAKE_CASE : str = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE : int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =['input_ids', 'attention_mask'] | 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
__snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__snake_case = {value: key for key, value in encode_dict.items()}
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _A ( _lowercase ) -> str:
"""simple docstring"""
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__UpperCamelCase = ''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCAmelCase( ) -> Optional[int]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCAmelCase( ) -> List[str]:
"""simple docstring"""
_A = 'mock-s3-bucket'
_A = F"s3://{mock_bucket}"
_A = extract_path_from_uri(_SCREAMING_SNAKE_CASE )
assert dataset_path.startswith('s3://' ) is False
_A = './local/path'
_A = extract_path_from_uri(_SCREAMING_SNAKE_CASE )
assert dataset_path == new_dataset_path
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = is_remote_filesystem(_SCREAMING_SNAKE_CASE )
assert is_remote is True
_A = fsspec.filesystem('file' )
_A = is_remote_filesystem(_SCREAMING_SNAKE_CASE )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , _SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
_A = input_paths[compression_fs_class.protocol]
if input_path is None:
_A = F"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_SCREAMING_SNAKE_CASE )
_A = fsspec.filesystem(compression_fs_class.protocol , fo=_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = os.path.basename(_SCREAMING_SNAKE_CASE )
_A = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f, open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
_A = compressed_file_paths[protocol]
_A = 'dataset.jsonl'
_A = F"{protocol}://{member_file_path}::{compressed_file_path}"
_A, *_A = fsspec.get_fs_token_paths(_SCREAMING_SNAKE_CASE )
assert fs.isfile(_SCREAMING_SNAKE_CASE )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = hf_api.dataset_info(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
_A = HfFileSystem(repo_info=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(_SCREAMING_SNAKE_CASE ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def __lowerCAmelCase( ) -> str:
"""simple docstring"""
_A = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , clobber=_SCREAMING_SNAKE_CASE )
with pytest.warns(_SCREAMING_SNAKE_CASE ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_SCREAMING_SNAKE_CASE ) == 1
assert (
str(warning_info[0].message )
== F"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 27 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
SCREAMING_SNAKE_CASE : int = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
SCREAMING_SNAKE_CASE : str = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
SCREAMING_SNAKE_CASE : List[str] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('image_encoder.module' ,'flava.image_model' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('text_encoder.module' ,'flava.text_model' )
SCREAMING_SNAKE_CASE : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('text_projection' ,'flava.text_projection' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('image_projection' ,'flava.image_projection' )
SCREAMING_SNAKE_CASE : Dict = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE : int = value
return upgrade
@torch.no_grad()
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=None ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : int = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaConfig()
SCREAMING_SNAKE_CASE : str = FlavaForPreTraining(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(__UpperCamelCase ,__UpperCamelCase ,save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = torch.load(__UpperCamelCase ,map_location='cpu' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' )
SCREAMING_SNAKE_CASE : int = upgrade_state_dict(__UpperCamelCase ,__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 28 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('glue' , 'mrpc' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase, __UpperCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
__UpperCamelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowercase , default=_lowercase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Tuple: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase_ ( ) -> str:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE_ = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa | 31 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase (_a ):
@slow
@require_torch
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny','prajjwal1/bert-tiny' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = bertabert.config.encoder.vocab_size
__UpperCamelCase = tokenizer.sep_token_id
__UpperCamelCase = tokenizer.cls_token_id
__UpperCamelCase = 128
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='train[:1%]' )
__UpperCamelCase = datasets.load_dataset('cnn_dailymail','3.0.0',split='validation[:1%]' )
__UpperCamelCase = train_dataset.select(range(32 ) )
__UpperCamelCase = val_dataset.select(range(16 ) )
__UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(A_: Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__UpperCamelCase = tokenizer(batch['article'],padding='max_length',truncation=A_,max_length=512 )
__UpperCamelCase = tokenizer(batch['highlights'],padding='max_length',truncation=A_,max_length=128 )
__UpperCamelCase = inputs.input_ids
__UpperCamelCase = inputs.attention_mask
__UpperCamelCase = outputs.input_ids
__UpperCamelCase = outputs.input_ids.copy()
__UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__UpperCamelCase = outputs.attention_mask
assert all(len(A_ ) == 512 for x in inputs.input_ids )
assert all(len(A_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(A_: str ):
__UpperCamelCase = pred.label_ids
__UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
__UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
train_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
# same for validation dataset
__UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=A_,batch_size=A_,remove_columns=['article', 'highlights'],)
val_dataset.set_format(
type='torch',columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'],)
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=A_,per_device_train_batch_size=A_,per_device_eval_batch_size=A_,predict_with_generate=A_,evaluation_strategy='steps',do_train=A_,do_eval=A_,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
__UpperCamelCase = SeqaSeqTrainer(
model=A_,args=A_,compute_metrics=_compute_metrics,train_dataset=A_,eval_dataset=A_,tokenizer=A_,)
# start training
trainer.train()
| 1 | 0 |
UpperCAmelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> bytes:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = ''''''.join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
_UpperCAmelCase = B'''=''' * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
_UpperCAmelCase = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = (
'''argument should be a bytes-like object or ASCII string, '''
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
_UpperCAmelCase = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
_UpperCAmelCase = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_UpperCAmelCase = encoded_data[:-padding]
_UpperCAmelCase = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_UpperCAmelCase = ''''''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
_UpperCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
def _A ( _lowercase = 1_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
snake_case__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
snake_case__ , snake_case__ = emb.weight.shape
snake_case__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' )
snake_case__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
snake_case__ = mam_aaa['''model''']
remove_ignore_keys_(__lowerCAmelCase )
snake_case__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ = MaMaaaConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
snake_case__ = state_dict['''decoder.embed_tokens.weight''']
snake_case__ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
snake_case__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ : Optional[int] = parser.parse_args()
lowerCamelCase__ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 33 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( _lowercase , _lowercase=0 ) -> Dict:
"""simple docstring"""
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> List[Any]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase=float('inf' ) ) -> Tuple:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
__UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase = current_dis
return min_dis
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
__UpperCamelCase = points_counts // 2
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
__UpperCamelCase = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase = min(_lowercase , _lowercase )
__UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
__UpperCamelCase = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = column_based_sort(_lowercase , column=0 )
__UpperCamelCase = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__snake_case = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 1 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE_ = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = 'tf'
else:
SCREAMING_SNAKE_CASE_ = 'jax'
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = PerceiverTokenizer
A_ = False
def UpperCAmelCase__ ( self) -> Optional[int]:
super().setUp()
UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''')
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2_0 , lowerCamelCase_=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
try:
UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCamelCase = list(filter(lambda lowerCamelCase_: re.match(R'''^[ a-zA-Z]+$''' , t[1]) , lowerCamelCase_))
UpperCamelCase = list(filter(lambda lowerCamelCase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_) , lowerCamelCase_))
if max_length is not None and len(lowerCamelCase_) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(lowerCamelCase_) < min_length and len(lowerCamelCase_) > 0:
while len(lowerCamelCase_) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_)
if " " not in output_txt and len(lowerCamelCase_) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_)
)
if with_prefix_space:
UpperCamelCase = ''' ''' + output_txt
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
return output_txt, output_ids
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = '''Unicode €.'''
UpperCamelCase = tokenizer(lowerCamelCase_)
UpperCamelCase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_)
# decoding
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , '''[CLS]Unicode €.[SEP]''')
UpperCamelCase = tokenizer('''e è é ê ë''')
UpperCamelCase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_)
# decoding
UpperCamelCase = tokenizer.decode(lowerCamelCase_)
self.assertEqual(lowerCamelCase_ , '''[CLS]e è é ê ë[SEP]''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''[CLS]e è é ê ë[SEP]''')
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCamelCase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
if FRAMEWORK != "jax":
UpperCamelCase = list(batch.input_ids.numpy()[0])
else:
UpperCamelCase = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual((2, 3_8) , batch.input_ids.shape)
self.assertEqual((2, 3_8) , batch.attention_mask.shape)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase_)
self.assertIn('''attention_mask''' , lowerCamelCase_)
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_)
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.perceiver_tokenizer
UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCamelCase = tokenizer(
text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_)
self.assertEqual(3_2 , targets['''input_ids'''].shape[1])
def UpperCAmelCase__ ( self) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 4_2)
# Now let's start the test
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_)
UpperCamelCase = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
shutil.rmtree(lowerCamelCase_)
UpperCamelCase = self.get_tokenizers(model_max_length=4_2)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_)
UpperCamelCase = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 4_2)
UpperCamelCase = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=4_3)
self.assertEqual(tokenizer.model_max_length , 4_3)
shutil.rmtree(lowerCamelCase_)
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCamelCase = json.load(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCamelCase = json.load(lowerCamelCase_)
UpperCamelCase = [F'<extra_id_{i}>' for i in range(1_2_5)]
UpperCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase_)]
UpperCamelCase = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8]) , '''�''')
def UpperCAmelCase__ ( self) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self) -> List[Any]:
pass
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCamelCase = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
UpperCamelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCamelCase = tokenizer.convert_tokens_to_string(lowerCamelCase_)
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_) | 34 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCamelCase (_a ):
_lowercase = """bert"""
def __init__( self: Any,A_: Dict=3_0522,A_: Optional[Any]=768,A_: Union[str, Any]=12,A_: List[Any]=12,A_: Optional[int]=3072,A_: Union[str, Any]="gelu",A_: List[str]=0.1,A_: Dict=0.1,A_: Optional[int]=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=0,A_: List[Any]="absolute",A_: str=True,A_: Union[str, Any]=None,**A_: int,):
'''simple docstring'''
super().__init__(pad_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 1 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a_ :Tuple = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a_ :List[str] = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def a ( A__ , A__=False ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = create_model(
'''HTSAT-tiny''' , '''roberta''' , A__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=A__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = r'''.*sequential.(\d+).*'''
SCREAMING_SNAKE_CASE__ : Any = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE__ : Dict = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.match(A__ , A__ ).group(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(A__ )//3}.linear.""" )
elif re.match(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE__ : Optional[int] = value
SCREAMING_SNAKE_CASE__ : Tuple = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE__ : Optional[int] = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE__ : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE__ : str = query_layer
SCREAMING_SNAKE_CASE__ : Dict = key_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value_layer
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return model_state_dict
def a ( A__ , A__ , A__ , A__=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
SCREAMING_SNAKE_CASE__ : int = clap_model.state_dict()
SCREAMING_SNAKE_CASE__ : Optional[Any] = rename_state_dict(A__ )
SCREAMING_SNAKE_CASE__ : str = ClapConfig()
SCREAMING_SNAKE_CASE__ : List[Any] = enable_fusion
SCREAMING_SNAKE_CASE__ : List[Any] = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a_ :Any = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 35 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger('''transformers.models.speecht5''')
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : int ) -> Any:
'''simple docstring'''
hf_model.apply_weight_norm()
snake_case : Any = checkpoint["""input_conv.weight_g"""]
snake_case : Any = checkpoint["""input_conv.weight_v"""]
snake_case : Optional[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
snake_case : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : List[str] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Any = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : Union[str, Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : str = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : Union[str, Any] = checkpoint["""output_conv.1.weight_g"""]
snake_case : Tuple = checkpoint["""output_conv.1.weight_v"""]
snake_case : Dict = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase ( __A : List[Any] , __A : Optional[Any] , __A : List[str] , __A : Optional[Any]=None , __A : List[str]=None , ) -> str:
'''simple docstring'''
if config_path is not None:
snake_case : str = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
snake_case : Tuple = SpeechTaHifiGanConfig()
snake_case : Optional[Any] = SpeechTaHifiGan(__A )
snake_case : str = torch.load(__A )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __A , __A )
snake_case : List[Any] = np.load(__A )
snake_case : Tuple = stats[0].reshape(-1 )
snake_case : str = stats[1].reshape(-1 )
snake_case : Optional[int] = torch.from_numpy(__A ).float()
snake_case : Tuple = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__A )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowercase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Optional[int] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = GPTaTokenizer
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any="<|endoftext|>" , lowerCamelCase__ : Dict="<|endoftext|>" , lowerCamelCase__ : Tuple="<|endoftext|>" , lowerCamelCase__ : str=False , **lowerCamelCase__ : Any , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Dict = kwargs.pop("add_bos_token" , lowerCamelCase__ )
a__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Optional[int] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : List[str] = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowerCamelCase__ )
a__ : int = add_prefix_space
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
a__ : str = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ):
a__ : Any = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : "Conversation" ):
a__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
a__ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 37 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
__UpperCamelCase = import_module('tasks' )
try:
__UpperCamelCase = getattr(_lowercase , model_args.task_type )
__UpperCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCamelCase = token_classification_task.get_labels(data_args.labels )
__UpperCamelCase = dict(enumerate(_lowercase ) )
__UpperCamelCase = len(_lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowercase , _lowercase ) -> Tuple[List[int], List[int]]:
__UpperCamelCase = np.argmax(_lowercase , axis=2 )
__UpperCamelCase, __UpperCamelCase = preds.shape
__UpperCamelCase = [[] for _ in range(_lowercase )]
__UpperCamelCase = [[] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase, __UpperCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowercase , _lowercase ),
"precision": precision_score(_lowercase , _lowercase ),
"recall": recall_score(_lowercase , _lowercase ),
"f1": fa_score(_lowercase , _lowercase ),
}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
# Predict
if training_args.do_predict:
__UpperCamelCase = TokenClassificationDataset(
token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = trainer.predict(_lowercase )
__UpperCamelCase, __UpperCamelCase = align_predictions(_lowercase , _lowercase )
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__UpperCamelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase )
return results
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : list[str] ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = """"""
for word_or_phrase in separated:
if not isinstance(__magic_name__ , __magic_name__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__magic_name__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 38 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ ) // 2
# choose the middle 3 elements
snake_case_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCamelCase : int = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase : Dict = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(snake_case__ )
UpperCamelCase : List[Any] = []
for value in value_array:
UpperCamelCase : Optional[Any] = euclidean(snake_case__ , dataset[0] )
UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase : Union[str, Any] = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
UpperCamelCase : str = temp_dist
UpperCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = VideoToVideoSDPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
_lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase = False
# No `output_type`.
_lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=32,attention_head_dim=4,)
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,beta_schedule='scaled_linear',clip_sample=A_,set_alpha_to_one=A_,)
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='gelu',projection_dim=512,)
__UpperCamelCase = CLIPTextModel(A_ )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Any=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, 3, 3, 32, 32),rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = VideoToVideoSDPipeline(**A_ )
__UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = self.get_dummy_inputs(A_ )
__UpperCamelCase = 'np'
__UpperCamelCase = sd_pipe(**A_ ).frames
__UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case_ ( self: Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
def snake_case_ ( self: Any ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase = torch.randn((1, 10, 3, 1024, 576),generator=A_ )
__UpperCamelCase = video.to('cuda' )
__UpperCamelCase = 'Spiderman is surfing'
__UpperCamelCase = pipe(A_,video=A_,generator=A_,num_inference_steps=3,output_type='pt' ).frames
__UpperCamelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 1 | 0 |