code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 81 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCAmelCase_ = False
if num < 0:
UpperCAmelCase_ = True
UpperCAmelCase_ = -num
UpperCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowerCAmelCase__ ) for e in binary )
return "0b" + "".join(str(lowerCAmelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
lowercase = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = pil_image.size
lowercase = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type='dict' , config=__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase , lowercase , lowercase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = apply_ocr
lowercase = ocr_lang
lowercase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase = []
lowercase = []
for image in images:
lowercase , lowercase = apply_tesseract(snake_case , snake_case , snake_case )
words_batch.append(snake_case )
boxes_batch.append(snake_case )
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase = [flip_channel_order(snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
if apply_ocr:
lowercase = words_batch
lowercase = boxes_batch
return data
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'gpt_neo'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any] , a_ : Union[str, Any]=5_0257 , a_ : Any=2048 , a_ : Dict=2048 , a_ : Optional[Any]=24 , a_ : List[str]=[[["global", "local"], 12]] , a_ : List[Any]=16 , a_ : List[Any]=None , a_ : Tuple=256 , a_ : List[Any]="gelu_new" , a_ : Optional[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Dict=0.0 , a_ : Any=0.1 , a_ : List[Any]=1e-5 , a_ : List[str]=0.02 , a_ : List[str]=True , a_ : Tuple=5_0256 , a_ : str=5_0256 , **a_ : str , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = window_size
SCREAMING_SNAKE_CASE__ : str = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = resid_dropout
SCREAMING_SNAKE_CASE__ : str = embed_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_types
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def __lowercase( a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : str ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE__ : Any = input.size()
SCREAMING_SNAKE_CASE__ : Dict = len(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = shape[dimension]
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(0 , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.div(sizedim - size , lowercase__ , rounding_mode='floor' ) + 1
SCREAMING_SNAKE_CASE__ : int = torch.arange(lowercase__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE__ : Any = [slice(lowercase__ )] * rank
SCREAMING_SNAKE_CASE__ : List[str] = indices
SCREAMING_SNAKE_CASE__ : Tuple = input[s]
SCREAMING_SNAKE_CASE__ : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase__ )
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE__ : Any = torch.arange(1 , lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = torch.remainder(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = remainders == 0
SCREAMING_SNAKE_CASE__ : str = candidates[divisor_indices]
SCREAMING_SNAKE_CASE__ : Any = torch.max(lowercase__ )
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode='floor' )
class snake_case ( UpperCamelCase_ ):
@property
def __lowercase( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='inputs' )
SCREAMING_SNAKE_CASE__ : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE__ : str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase( self : str )-> int:
"""simple docstring"""
return self._config.num_heads
def __lowercase( self : Optional[int] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE__ : Tuple = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def __lowercase( self : Dict )-> int:
"""simple docstring"""
return 13
| 85 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ):
A_ = parent
def __A ( self : List[str] ):
return {}
def __snake_case ( ):
"""simple docstring"""
A_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
A_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def __A ( self : int ):
A_ = MarkupLMFeatureExtractionTester(self )
@property
def __A ( self : List[str] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __A ( self : Tuple ):
# Initialize feature_extractor
A_ = self.feature_extraction_class()
# Test not batched input
A_ = get_html_strings()[0]
A_ = feature_extractor(UpperCAmelCase )
# fmt: off
A_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
A_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
# Test batched
A_ = get_html_strings()
A_ = feature_extractor(UpperCAmelCase )
# fmt: off
A_ = expected_nodes + [["My First Heading", "My first paragraph."]]
A_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase ) | 86 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
from math import factorial
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : List[Any] = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Tuple = [1] * rank
else:
_lowerCamelCase : Any = rank
def __repr__( self) -> Optional[Any]:
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE)+"E"+str(n+1)for n,dual in enumerate(self.duals))}'
)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1)
return Dual(self.real , SCREAMING_SNAKE_CASE)
def __add__( self , SCREAMING_SNAKE_CASE) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return Dual(self.real + other , self.duals)
_lowerCamelCase : Optional[int] = self.duals.copy()
_lowerCamelCase : List[Any] = other.duals.copy()
if len(SCREAMING_SNAKE_CASE) > len(SCREAMING_SNAKE_CASE):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE) - len(SCREAMING_SNAKE_CASE)))
elif len(SCREAMING_SNAKE_CASE) < len(SCREAMING_SNAKE_CASE):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE) - len(SCREAMING_SNAKE_CASE)))
_lowerCamelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE)):
new_duals.append(s_dual[i] + o_dual[i])
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE)
__UpperCAmelCase = __add__
def __sub__( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
return self + other * -1
def __mul__( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other)
return Dual(self.real * other , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = [0] * (len(self.duals) + len(other.duals) + 1)
for i, item in enumerate(self.duals):
for j, jtem in enumerate(other.duals):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals)):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals)):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE)
__UpperCAmelCase = __mul__
def __truediv__( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other)
return Dual(self.real / other , SCREAMING_SNAKE_CASE)
raise ValueError
def __floordiv__( self , SCREAMING_SNAKE_CASE) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other)
return Dual(self.real // other , SCREAMING_SNAKE_CASE)
raise ValueError
def __pow__( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise ValueError("""power must be a positive integer""")
if n == 0:
return 1
if n == 1:
return self
_lowerCamelCase : int = self
for _ in range(n - 1):
x *= self
return x
def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict ):
"""simple docstring"""
if not callable(__snake_case ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__snake_case , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""differentiate() requires an int as input for order""" )
_lowerCamelCase : Any = Dual(__snake_case , 1 )
_lowerCamelCase : Tuple = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _snake_case ( __snake_case : Tuple ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 88 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = "bart"
SCREAMING_SNAKE_CASE : str = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Dict:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Any = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : str = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Any = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : int = sas_model.eval()
else:
_lowercase , _lowercase : int = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
_lowercase : List[Any] = faiss.StandardGpuResources()
_lowercase : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Optional[int] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Union[str, Any] = (None, None)
_lowercase : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Tuple = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : int = elia['train_eli5']
_lowercase : Any = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Any = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[Any]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : Any = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : List[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Optional[Any] = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : List[str] = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : int = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Tuple = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> List[str]:
with torch.no_grad():
_lowercase : Dict = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : List[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : str = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Any = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : str = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : str = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : Optional[int] = "wiki40b"
SCREAMING_SNAKE_CASE : List[Any] = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 64
SCREAMING_SNAKE_CASE : List[Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : str = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : List[Any] = None
# start main text
SCREAMING_SNAKE_CASE : Optional[int] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : Optional[Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : int = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : int = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : int = support_list[:10]
SCREAMING_SNAKE_CASE : Dict = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : str = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Dict = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : Union[str, Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : List[str] = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Optional[int] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : Any = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : str = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 89 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 90 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 0 |
"""simple docstring"""
_lowercase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowercase = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(snake_case__ )}'
)
raise ValueError(snake_case__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ):
'''simple docstring'''
lowercase : List[str] =[2, 1, 2, -1]
lowercase : Tuple =[1, 2, 3, 4]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =len(self.first_signal )
lowercase : Any =len(self.second_signal )
lowercase : Optional[Any] =max(UpperCAmelCase__ , UpperCAmelCase__ )
# create a zero matrix of max_length x max_length
lowercase : Any =[[0] * max_length for i in range(UpperCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase__ ):
lowercase : Any =deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase__ )
for j, item in enumerate(UpperCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase : Optional[Any] =np.matmul(np.transpose(UpperCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 92 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A = datasets.load_iris()
__A = np.array(data["""data"""])
__A = np.array(data["""target"""])
__A = data["""target_names"""]
__A , __A , __A , __A = train_test_split(X, y)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :str = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
lowerCAmelCase__ :Optional[Any] = []
for data_point in data:
lowerCAmelCase__ :Tuple = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase__ :str = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase__ :Optional[int] = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 93 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self : str , UpperCAmelCase : str=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict ={}
if top_k is not None:
lowercase : Union[str, Any] =top_k
return {}, {}, postprocess_params
def __call__( self : str , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : List[Any] =load_image(UpperCAmelCase )
lowercase : Optional[Any] =self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : str =self.model(**UpperCAmelCase )
return model_outputs
def A__ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=5 ) -> str:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase : Any =self.model.config.num_labels
if self.framework == "pt":
lowercase : Dict =model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict =probs.topk(UpperCAmelCase )
elif self.framework == "tf":
lowercase : Tuple =stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase : Dict =tf.math.top_k(UpperCAmelCase , k=UpperCAmelCase )
lowercase , lowercase : Any =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase : str =scores.tolist()
lowercase : Dict =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 94 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''efficientformer'''
def __init__( self : int , lowerCAmelCase_ : List[int] = [3, 2, 6, 4] , lowerCAmelCase_ : List[int] = [48, 96, 224, 448] , lowerCAmelCase_ : List[bool] = [True, True, True, True] , lowerCAmelCase_ : int = 448 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : float = 1e-5 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : int = 224 , lowerCAmelCase_ : float = 1e-05 , **lowerCAmelCase_ : List[str] , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : Optional[Any] = mlp_expansion_ratio
UpperCAmelCase_ : Tuple = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Any = resolution
UpperCAmelCase_ : Any = pool_size
UpperCAmelCase_ : str = downsample_patch_size
UpperCAmelCase_ : Optional[Any] = downsample_stride
UpperCAmelCase_ : Tuple = downsample_pad
UpperCAmelCase_ : Optional[Any] = drop_path_rate
UpperCAmelCase_ : List[str] = num_metaad_blocks
UpperCAmelCase_ : Optional[Any] = distillation
UpperCAmelCase_ : Optional[Any] = use_layer_scale
UpperCAmelCase_ : Dict = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Optional[Any] = batch_norm_eps
| 95 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
__lowerCamelCase = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ = "nezha"
def __init__( self : Optional[Any] , __snake_case : Union[str, Any]=2_1_1_2_8 , __snake_case : Optional[int]=7_6_8 , __snake_case : Dict=1_2 , __snake_case : Union[str, Any]=1_2 , __snake_case : Tuple=3_0_7_2 , __snake_case : List[str]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Tuple=5_1_2 , __snake_case : str=6_4 , __snake_case : Optional[Any]=2 , __snake_case : List[Any]=0.02 , __snake_case : List[str]=1E-12 , __snake_case : List[Any]=0.1 , __snake_case : Optional[int]=0 , __snake_case : Any=2 , __snake_case : int=3 , __snake_case : Any=True , **__snake_case : Tuple , ) -> List[Any]:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__: List[str] = vocab_size
__magic_name__: Optional[int] = hidden_size
__magic_name__: Optional[Any] = num_hidden_layers
__magic_name__: Union[str, Any] = num_attention_heads
__magic_name__: str = hidden_act
__magic_name__: Optional[int] = intermediate_size
__magic_name__: Dict = hidden_dropout_prob
__magic_name__: Dict = attention_probs_dropout_prob
__magic_name__: List[Any] = max_position_embeddings
__magic_name__: Any = max_relative_position
__magic_name__: Tuple = type_vocab_size
__magic_name__: Optional[Any] = initializer_range
__magic_name__: str = layer_norm_eps
__magic_name__: Tuple = classifier_dropout
__magic_name__: Tuple = use_cache
| 96 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
__a = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
__a = '</w>'
__a = '@@ '
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
# Speech2Text2 has no max input length
__a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowercase_ = None
lowercase_ = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[:-1]
lowercase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
@property
def _lowercase ( self : Any ) -> int:
return len(self.decoder )
def _lowercase ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = word.replace(SCREAMING_SNAKE_CASE_ , '''''' )
lowercase_ = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = word
return word
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase_ = text.lower()
lowercase_ = text.split()
lowercase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
lowercase_ = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowercase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = eval_examples
_UpperCamelCase = post_process_function
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : str = "eval" ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase = self.get_eval_dataloader(lowerCAmelCase__ )
_UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase = self.compute_metrics
_UpperCamelCase = None
_UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase = time.time()
try:
_UpperCamelCase = eval_loop(
lowerCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , )
finally:
_UpperCamelCase = compute_metrics
_UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions )
_UpperCamelCase = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_UpperCamelCase = metrics.pop(lowerCAmelCase__ )
metrics.update(output.metrics )
else:
_UpperCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__ )
return metrics
def snake_case__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : str = "test" ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_test_dataloader(lowerCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase = self.compute_metrics
_UpperCamelCase = None
_UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase = time.time()
try:
_UpperCamelCase = eval_loop(
lowerCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , )
finally:
_UpperCamelCase = compute_metrics
_UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions , '''predict''' )
_UpperCamelCase = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_UpperCamelCase = metrics.pop(lowerCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__ )
| 98 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
def a (lowerCAmelCase__ ):
__a = []
if len(lowerCAmelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase__ ) ):
__a = nums.pop(0 )
__a = permute(lowerCAmelCase__ )
for perm in permutations:
perm.append(lowerCAmelCase__ )
result.extend(lowerCAmelCase__ )
nums.append(lowerCAmelCase__ )
return result
def a (lowerCAmelCase__ ):
def backtrack(lowerCAmelCase__ ):
if start == len(lowerCAmelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a , __a = nums[i], nums[start]
backtrack(start + 1 )
__a , __a = nums[i], nums[start] # backtrack
__a = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
SCREAMING_SNAKE_CASE = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 99 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCAmelCase_ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCAmelCase_ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCAmelCase_ )
return parser.parse_args()
def __snake_case ( ) -> int:
SCREAMING_SNAKE_CASE__ = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ = script_fpath.stem
SCREAMING_SNAKE_CASE__ = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 100 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """ClapFeatureExtractor"""
_UpperCAmelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if audios is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor(
lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 101 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
UpperCamelCase : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__magic_name__ : Tuple = logging.getLogger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if metric == "rouge2":
UpperCamelCase : Tuple = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCamelCase : int = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCamelCase : Union[str, Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
UpperCamelCase : Tuple = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
UpperCamelCase : int = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , )
class lowercase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _a ( self , _A , _A , _A , _A=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCamelCase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase : Tuple = od / """test_results.txt"""
UpperCamelCase : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase : int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase : str = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , """a+""" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase : int = metrics[key]
if isinstance(_A , torch.Tensor ):
UpperCamelCase : Optional[int] = val.item()
UpperCamelCase : Union[str, Any] = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_A )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
try:
UpperCamelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase : Any = pl_module.model.num_parameters()
UpperCamelCase : Any = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , """test""" )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 102 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , __lowerCamelCase : str=8_0 , __lowerCamelCase : str=1_6_0_0_0 , __lowerCamelCase : List[str]=8_0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
_snake_case = num_mel_bins
_snake_case = do_ceptral_normalize
_snake_case = normalize_means
_snake_case = normalize_vars
_snake_case = True
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , ):
"""simple docstring"""
_snake_case = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_snake_case = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
_snake_case = ta_kaldi.fbank(__lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : float = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
_snake_case = x[:input_length].mean(axis=0 )
_snake_case = np.subtract(__lowerCamelCase , __lowerCamelCase )
if normalize_vars:
_snake_case = x[:input_length].std(axis=0 )
_snake_case = np.divide(__lowerCamelCase , __lowerCamelCase )
if input_length < x.shape[0]:
_snake_case = padding_value
# make sure array is in float32
_snake_case = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : Optional[np.ndarray] = None ):
"""simple docstring"""
_snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase , __lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCamelCase , __lowerCamelCase )
]
def __call__( self : int , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_snake_case = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
_snake_case = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case = [raw_speech]
# extract fbank features
_snake_case = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
_snake_case = BatchFeature({'''input_features''': features} )
_snake_case = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
# make sure list is in array format
_snake_case = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowerCamelCase ):
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
_snake_case = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_snake_case = [np.asarray(__lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_snake_case = (
np.array(__lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase , max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_snake_case = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowerCamelCase )
if return_tensors is not None:
_snake_case = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 103 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCamelCase ( UpperCAmelCase_ : bool = True, *UpperCAmelCase_ : List[Any], **UpperCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*UpperCAmelCase_, **UpperCAmelCase_, disable=UpperCAmelCase_ )
| 104 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 0 |
from typing import Any
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
SCREAMING_SNAKE_CASE_ : Any = [input_list.count(lowerCamelCase_ ) for value in input_list]
SCREAMING_SNAKE_CASE_ : Dict = max(lowerCamelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] =logging.get_logger(__name__)
__snake_case :List[Any] ={
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Dict = 'nllb-moe'
A_ : Optional[int] = ['past_key_values']
A_ : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : int=128_112 , __UpperCamelCase : str=1_024 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : str=4_096 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[Any]=4_096 , __UpperCamelCase : int=16 , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : Optional[Any]=0.0_5 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Any=1_024 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=2 , __UpperCamelCase : str=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[Any]="float32" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=128 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=0.0_0_1 , __UpperCamelCase : Dict=0.0_0_1 , __UpperCamelCase : Optional[Any]="all" , __UpperCamelCase : List[str]=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=1.0 , __UpperCamelCase : List[str]=0.2 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=False , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = router_z_loss_coef
A = router_aux_loss_coef
A = decoder_sparse_step
A = encoder_sparse_step
A = num_experts
A = expert_capacity
A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = batch_prioritized_routing
A = second_expert_policy
A = normalize_router_prob_before_dropping
A = moe_eval_capacity_token_fraction
A = moe_token_dropout
A = output_router_logits
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict = logging.getLogger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "masked_bert"
def __init__( self : Dict, UpperCamelCase__ : Optional[int]=3_05_22, UpperCamelCase__ : Dict=7_68, UpperCamelCase__ : Tuple=12, UpperCamelCase__ : str=12, UpperCamelCase__ : List[str]=30_72, UpperCamelCase__ : Tuple="gelu", UpperCamelCase__ : Dict=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : Optional[Any]=5_12, UpperCamelCase__ : Optional[Any]=2, UpperCamelCase__ : Optional[Any]=0.02, UpperCamelCase__ : str=1e-12, UpperCamelCase__ : Dict=0, UpperCamelCase__ : Optional[Any]="topK", UpperCamelCase__ : str="constant", UpperCamelCase__ : Any=0.0, **UpperCamelCase__ : Dict, ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase__, **UpperCamelCase__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = pruning_method
_A = mask_init
_A = mask_scale
| 107 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__a: List[str] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _SCREAMING_SNAKE_CASE ( __snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_UpperCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) )
__a: Tuple = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats())) | 108 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a = sys.version_info >= (3, 10)
def __magic_name__ ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class __a :
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : str
__UpperCamelCase : bool
@dataclass
class __a :
__UpperCamelCase : int = 42
__UpperCamelCase : str = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class __a :
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[bool] = None
class __a ( _snake_case ):
__UpperCamelCase : Any = 'titi'
__UpperCamelCase : Union[str, Any] = 'toto'
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = 'titi'
__UpperCamelCase : int = 'toto'
__UpperCamelCase : Optional[Any] = 42
@dataclass
class __a :
__UpperCamelCase : BasicEnum = "toto"
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicEnum(self.foo )
@dataclass
class __a :
__UpperCamelCase : MixedTypeEnum = "toto"
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[float] = field(default=_snake_case, metadata={'help': 'help message'} )
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[List[str]] = list_field(default=[] )
__UpperCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class __a :
__UpperCamelCase : List[int] = list_field(default=[] )
__UpperCamelCase : List[int] = list_field(default=[1, 2, 3] )
__UpperCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __a :
__UpperCamelCase : List[int] = field()
__UpperCamelCase : str = field()
__UpperCamelCase : BasicEnum = field()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum )
@dataclass
class __a :
__UpperCamelCase : int
__UpperCamelCase : "BasicEnum" = field()
__UpperCamelCase : "Optional[bool]" = None
__UpperCamelCase : "str" = field(default='toto', metadata={'help': 'help message'} )
__UpperCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __a :
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : bool | None = None
@dataclass
class __a :
__UpperCamelCase : int | None = None
__UpperCamelCase : float | None = field(default=_snake_case, metadata={'help': 'help message'} )
__UpperCamelCase : str | None = None
__UpperCamelCase : list[str] | None = list_field(default=[] )
__UpperCamelCase : list[int] | None = list_field(default=[] )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : argparse.ArgumentParser ,lowerCamelCase : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase ).items() if k != """container"""}
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" ,lowerCamelCase ) and yy.get("""choices""" ,lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase ) ,yy["""type"""](lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--bar""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--baz""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--flag""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses(lowerCamelCase ,look_for_args_file=lowerCamelCase )
self.assertFalse(example.flag )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=42 ,type=lowerCamelCase )
expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase ,help="""help message""" )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
expected.add_argument("""--baz""" ,type=lowerCamelCase ,default=lowerCamelCase ,const=lowerCamelCase ,nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=lowerCamelCase ,dest="""baz""" )
expected.add_argument("""--opt""" ,type=lowerCamelCase ,default=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,baz=lowerCamelCase ,opt=lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
@dataclass
class __a :
__UpperCamelCase : Literal["titi", "toto", 42] = "toto"
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo ,"""toto""" )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo ,"""titi""" )
__SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo ,42 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=lowerCamelCase )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase )
expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(
lowerCamelCase ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,)
__SCREAMING_SNAKE_CASE = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,default=lowerCamelCase ,type=lowerCamelCase )
expected.add_argument("""--bar""" ,default=lowerCamelCase ,type=lowerCamelCase ,help="""help message""" )
expected.add_argument("""--baz""" ,default=lowerCamelCase ,type=lowerCamelCase )
expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase )
for dataclass_type in dataclass_types:
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowerCamelCase ,Namespace(foo=lowerCamelCase ,bar=lowerCamelCase ,baz=lowerCamelCase ,ces=[] ,des=[] ) )
__SCREAMING_SNAKE_CASE = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--required_list""" ,nargs="""+""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument("""--required_str""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase ,)
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument("""--foo""" ,type=lowerCamelCase ,required=lowerCamelCase )
expected.add_argument(
"""--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase ,)
expected.add_argument("""--opt""" ,type=lowerCamelCase ,default=lowerCamelCase )
expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase ,help="""help message""" )
expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase )
self.argparsersEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__SCREAMING_SNAKE_CASE = parser.parse_dict(lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowerCamelCase ,parser.parse_dict ,lowerCamelCase ,allow_extra_keys=lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""temp_json""" )
os.mkdir(lowerCamelCase )
with open(temp_local_path + """.json""" ,"""w+""" ) as f:
json.dump(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""temp_yaml""" )
os.mkdir(lowerCamelCase )
with open(temp_local_path + """.yaml""" ,"""w+""" ) as f:
yaml.dump(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
__SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
| 109 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase__ = 25_00_04
UpperCamelCase__ = 25_00_20
@require_sentencepiece
@require_tokenizers
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Tuple = MBartTokenizer
UpperCamelCase : Union[str, Any] = MBartTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def __snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
UpperCAmelCase__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __snake_case ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Tuple = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : str = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
UpperCAmelCase__ : int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : str = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
UpperCamelCase : Optional[int] = """facebook/mbart-large-en-ro"""
UpperCamelCase : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase : Tuple = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCAmelCase__ : Tuple = 1
return cls
def __snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def __snake_case ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
UpperCAmelCase__ : List[Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
UpperCAmelCase__ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = 10
UpperCAmelCase__ : int = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def __snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_026, 250_001] )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def __snake_case ( self ):
UpperCAmelCase__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='pt' )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase__ : List[Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='pt' )
UpperCAmelCase__ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='pt' )
UpperCAmelCase__ : int = targets['input_ids']
UpperCAmelCase__ : List[str] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 110 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = MobileBertTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = filter_non_english
SCREAMING_SNAKE_CASE : Optional[Any] = '''google/mobilebert-uncased'''
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = '''UNwant\u00E9d,running'''
lowerCAmelCase = '''unwanted, running'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = '''UNwant\u00E9d,running'''
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
lowerCAmelCase = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
lowerCAmelCase = '''UNwant\u00E9d,running'''
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase = {}
for i, token in enumerate(__lowerCAmelCase ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['''的''', '''人''', '''有''']
lowerCAmelCase = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 284 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Dict , __a : Optional[int] ) ->Optional[Any]:
lowerCamelCase_ : Optional[Any] = size
lowerCamelCase_ : Union[str, Any] = [0] * size
lowerCamelCase_ : str = [0] * size
@staticmethod
def _lowerCAmelCase ( __a : Dict ) ->List[Any]:
return index | (index + 1)
@staticmethod
def _lowerCAmelCase ( __a : List[str] ) ->Optional[Any]:
return (index & (index + 1)) - 1
def _lowerCAmelCase ( self : List[str] , __a : Tuple , __a : Optional[Any] ) ->Tuple:
lowerCamelCase_ : Union[str, Any] = value
while index < self.size:
lowerCamelCase_ : Optional[Any] = self.get_prev(__lowerCAmelCase ) + 1
if current_left_border == index:
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : List[str] = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : Optional[int] = self.get_next(__lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] ) ->Tuple:
right -= 1 # Because of right is exclusive
lowerCamelCase_ : List[Any] = 0
while left <= right:
lowerCamelCase_ : Tuple = self.get_prev(__lowerCAmelCase )
if left <= current_left:
lowerCamelCase_ : Any = max(__lowerCAmelCase , self.tree[right] )
lowerCamelCase_ : List[str] = current_left
else:
lowerCamelCase_ : Dict = max(__lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 0 |
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= (boundary[1] - boundary[0]) / steps
__lowercase= boundary[0]
__lowercase= boundary[1]
__lowercase= make_points(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= a + h
while x < (b - h):
yield x
__lowercase= x + h
def _lowerCamelCase( lowercase__ ) -> Tuple: # enter your function here
'''simple docstring'''
__lowercase= (x - 0) * (x - 0)
return y
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= 0.0 # Lower bound of integration
__lowercase= 1.0 # Upper bound of integration
__lowercase= 10.0 # define number of steps or resolution
__lowercase= [a, b] # define boundary of integration
__lowercase= method_a(lowercase__ , lowercase__ )
print(F'y = {y}' )
if __name__ == "__main__":
main()
| 230 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embeddings_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = len(__lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFResNetModel(config=__lowerCAmelCase )
_lowerCAmelCase = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFResNetForImageClassification(__lowerCAmelCase )
_lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : str = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : str = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFResNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(__lowerCAmelCase )
_lowerCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase = layer_type
_lowerCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFResNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors='tf' )
# forward pass
_lowerCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1e-4 ) )
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
_A : Optional[Any] = 2_56
# Modulus to hash a string
_A : Optional[Any] = 1_00_00_03
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Tuple = len(UpperCAmelCase )
lowerCamelCase__ : Any = len(UpperCAmelCase )
if p_len > t_len:
return False
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
lowerCamelCase__ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase__ : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase__ : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''abc1abc12'''
lowerCamelCase__ : Any = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ : Optional[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
lowerCamelCase__ : Optional[Any] = '''ABABX'''
lowerCamelCase__ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
lowerCamelCase__ : List[Any] = '''AAAB'''
lowerCamelCase__ : str = '''ABAAAAAB'''
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
lowerCamelCase__ : Tuple = '''abcdabcy'''
lowerCamelCase__ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
lowerCamelCase__ : Dict = '''Lü'''
lowerCamelCase__ : Tuple = '''Lüsai'''
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = '''Lue'''
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 315 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] = False ) -> List[str]:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ :Optional[int] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
SCREAMING_SNAKE_CASE__ :List[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(UpperCAmelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ :int = primes[:idx]
break
SCREAMING_SNAKE_CASE__ :Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ :str = False
for r in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :List[Any] = pow(UpperCAmelCase__ , d * 2**r , UpperCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ :int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 209 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __UpperCamelCase (lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A = int(lowerCAmelCase )
A = t // 3_600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : int, lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : List[str]=300 ) -> List[str]:
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> str:
A = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A = f'''{elt:.6f}''' if isinstance(lowerCAmelCase, lowerCAmelCase ) else str(lowerCAmelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : int = 0.2
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : List[Any] = True , UpperCamelCase__ : Dict = None , UpperCamelCase__ : int = 300 , ):
A = total
A = '''''' if prefix is None else prefix
A = leave
A = parent
A = width
A = None
A = None
A = None
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple = False , UpperCamelCase__ : Tuple = None ):
A = value
if comment is not None:
A = comment
if self.last_value is None:
A = time.time()
A = value
A = None
A = self.warmup
A = 1
self.update_bar(__lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A = time.time()
A = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A = self.elapsed_time / (value - self.start_value)
else:
A = None
if value >= self.total:
A = self.total
A = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCAmelCase )
A = value
A = current_time
if self.average_time_per_item is None:
A = 1
else:
A = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=None ):
A = ''' ''' * (len(str(self.total ) ) - len(str(__lowerCAmelCase ) )) + str(__lowerCAmelCase )
if self.elapsed_time is None:
A = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
A = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
A = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def UpperCamelCase ( self : Any ):
A = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase ( self : List[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None ):
super().__init__(__lowerCAmelCase )
A = None if column_names is None else [column_names]
A = None
def UpperCamelCase ( self : List[Any] ):
A = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[str] ):
if self.inner_table is None:
A = [list(values.keys() ), list(values.values() )]
else:
A = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCAmelCase )
A = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=300 ):
A = NotebookProgressBar(__lowerCAmelCase , prefix=__lowerCAmelCase , parent=self , width=__lowerCAmelCase )
return self.child_bar
def UpperCamelCase ( self : Dict ):
A = None
self.display()
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
A = None
A = None
A = False
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , **UpperCamelCase__ : Any ):
A = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
A = 0
A = 0
A = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
A = NotebookTrainingTracker(state.max_steps , __lowerCAmelCase )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
A = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
A = False
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Dict ):
if not has_length(__lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A = self.training_tracker.add_child(len(__lowerCAmelCase ) )
else:
A = NotebookProgressBar(len(__lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , **UpperCamelCase__ : Tuple ):
if self.prediction_bar is not None:
self.prediction_bar.close()
A = None
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[int] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
A = state.global_step
self.training_tracker.write_line(__lowerCAmelCase )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Tuple ):
if self.training_tracker is not None:
A = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
A = log['''loss''']
break
if self.first_column == "Epoch":
A = int(state.epoch )
else:
A = state.global_step
A = '''eval'''
for k in metrics:
if k.endswith('_loss' ):
A = re.sub(R'\_loss$' , '' , __lowerCAmelCase )
A = metrics.pop('total_flos' , __lowerCAmelCase )
A = metrics.pop('epoch' , __lowerCAmelCase )
A = metrics.pop(f'''{metric_key_prefix}_runtime''' , __lowerCAmelCase )
A = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , __lowerCAmelCase )
A = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , __lowerCAmelCase )
A = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , __lowerCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
A = v
else:
A = k.split('_' )
A = ''' '''.join([part.capitalize() for part in splits[1:]] )
A = v
self.training_tracker.write_line(__lowerCAmelCase )
self.training_tracker.remove_child()
A = None
# Evaluation takes a long time so we should force the next update.
A = True
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , **UpperCamelCase__ : int ):
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__lowerCAmelCase )
A = None
| 699 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[Any]=False , snake_case_ : Optional[int]=False ) -> int:
__snake_case = '''backbone.''' if is_semantic else ''''''
__snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Tuple=False , snake_case_ : List[str]=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
__snake_case = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
__snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__snake_case = in_proj_weight[
: config.hidden_size, :
]
__snake_case = q_bias
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__snake_case = gamma_a
__snake_case = gamma_a
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : List[str] ) -> Tuple:
__snake_case = dct.pop(snake_case_ )
__snake_case = val
def lowerCamelCase__ ( ) -> Any:
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : str , snake_case_ : Any=False ) -> Optional[Any]:
__snake_case = False if '''rvlcdip''' in checkpoint_url else True
__snake_case = BeitConfig(use_absolute_position_embeddings=snake_case_ , use_mask_token=snake_case_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
__snake_case = 16
__snake_case = '''huggingface/label-files'''
__snake_case = '''rvlcdip-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(snake_case_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__snake_case = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''model''']
__snake_case = create_rename_keys(snake_case_ , has_lm_head=snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , has_lm_head=snake_case_ )
# load HuggingFace model
__snake_case = BeitForMaskedImageModeling(snake_case_ ) if has_lm_head else BeitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image
__snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=snake_case_ , return_tensors='''pt''' )
__snake_case = encoding['''pixel_values''']
__snake_case = model(snake_case_ )
__snake_case = outputs.logits
# verify logits
__snake_case = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(snake_case_ ), "Shape of logits not as expected"
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
if has_lm_head:
__snake_case = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
__snake_case = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case_ , )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
snake_case_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 592 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def __lowerCAmelCase ( _A ,_A=0.9_9_9 ,_A="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowercase = []
for i in range(_A ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) ,_A ) )
return torch.tensor(_A ,dtype=torch.floataa )
class _lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = 0.0_001 , UpperCAmelCase = 0.02 , UpperCAmelCase = "linear" , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0 , UpperCAmelCase = "epsilon" , UpperCAmelCase = 1.0 , **UpperCAmelCase , ):
'''simple docstring'''
if kwargs.get("""set_alpha_to_one""" , __lowerCAmelCase ) is not None:
_lowercase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
_lowercase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowercase = torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase = torch.linspace(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase = betas_for_alpha_bar(__lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowercase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , __lowerCAmelCase ).copy().astype(np.intaa ) )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
return sample
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowercase = num_inference_steps
_lowercase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowercase = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
self.timesteps += self.config.steps_offset
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , ):
'''simple docstring'''
_lowercase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowercase = self.alphas_cumprod[timestep]
_lowercase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowercase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowercase = model_output
elif self.config.prediction_type == "sample":
_lowercase = model_output
_lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowercase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowercase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 398 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
def __A ( _A ):
"""simple docstring"""
__a = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__a = ''''''
__a = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__a = 0, 0
# length[i] shows the length of palindromic substring with center i
__a = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
__a = 0
for j in range(len(_A ) ):
__a = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__a = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__a = j - k + 1 # noqa: E741
__a = j + k - 1
# update max_length and start position
if max_length < length[j]:
__a = length[j]
__a = j
# create that string
__a = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCamelCase : str = 0
lowerCamelCase : Dict = str(lowerCamelCase )
while len(lowerCamelCase ) != 1:
lowerCamelCase : Optional[Any] = [int(lowerCamelCase ) for i in num_string]
lowerCamelCase : Dict = 1
for i in range(0, len(lowerCamelCase ) ):
total *= numbers[i]
lowerCamelCase : int = str(lowerCamelCase )
steps += 1
return steps
def _a ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCamelCase : str = 0
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
while len(lowerCamelCase ) != 1:
lowerCamelCase : str = [int(lowerCamelCase ) for i in num_string]
lowerCamelCase : Optional[int] = 0
for i in range(0, len(lowerCamelCase ) ):
total += numbers[i]
lowerCamelCase : int = str(lowerCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case ( snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = job['''started_at''']
lowerCAmelCase = job['''completed_at''']
lowerCAmelCase = date_parser.parse(snake_case )
lowerCAmelCase = date_parser.parse(snake_case )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def snake_case ( snake_case : Optional[Any] , snake_case : Dict=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(snake_case , headers=snake_case ).json()
lowerCAmelCase = {}
try:
job_time.update({job['name']: extract_time_from_single_job(snake_case ) for job in result['jobs']} )
lowerCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(snake_case ):
lowerCAmelCase = requests.get(url + F'&page={i + 2}' , headers=snake_case ).json()
job_time.update({job['name']: extract_time_from_single_job(snake_case ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_UpperCamelCase : Any = parser.parse_args()
_UpperCamelCase : List[str] = get_job_time(args.workflow_run_id)
_UpperCamelCase : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 284 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 0 |
def __lowerCamelCase ( A__ : Any = 100_0000 ) -> str:
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Any = {1: 1}
for inputa in range(2 , A__ ):
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ : Tuple = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ : int = counter
if counter > pre_counter:
lowerCamelCase_ : List[Any] = inputa
lowerCamelCase_ : Dict = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 0 |
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[Any] = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : List[Any] = "unispeech-sat"
def __init__( self , __magic_name__=3_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-5 , __magic_name__="group" , __magic_name__="gelu" , __magic_name__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __magic_name__=(5, 2, 2, 2, 2, 2, 2) , __magic_name__=(1_0, 3, 3, 3, 3, 2, 2) , __magic_name__=False , __magic_name__=1_2_8 , __magic_name__=1_6 , __magic_name__=False , __magic_name__=True , __magic_name__=0.05 , __magic_name__=1_0 , __magic_name__=2 , __magic_name__=0.0 , __magic_name__=1_0 , __magic_name__=0 , __magic_name__=3_2_0 , __magic_name__=2 , __magic_name__=0.1 , __magic_name__=1_0_0 , __magic_name__=2_5_6 , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__="mean" , __magic_name__=False , __magic_name__=False , __magic_name__=2_5_6 , __magic_name__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __magic_name__=(5, 3, 3, 1, 1) , __magic_name__=(1, 2, 3, 1, 1) , __magic_name__=5_1_2 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5_0_4 , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = num_clusters
_lowerCAmelCase = do_stable_layer_norm
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase = num_codevectors_per_group
_lowerCAmelCase = num_codevector_groups
_lowerCAmelCase = contrastive_logits_temperature
_lowerCAmelCase = feat_quantizer_dropout
_lowerCAmelCase = num_negatives
_lowerCAmelCase = codevector_dim
_lowerCAmelCase = proj_codevector_dim
_lowerCAmelCase = diversity_loss_weight
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = list(__lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 589 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_A : List[Any] = """
import os
"""
_A : Any = """
def foo():
import os
return False
"""
_A : int = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_A : Tuple = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_A : Optional[Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_A : Optional[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_A : Optional[int] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_A : Tuple = """
import os
try:
import bar
except:
raise ValueError()
"""
_A : int = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_A : Dict = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_A : List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCAmelCase , '''test_file.py''' )
with open(UpperCAmelCase , '''w''' ) as _tmp_file:
_tmp_file.write(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = get_imports(UpperCAmelCase )
assert parsed_imports == ["os"]
| 315 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = 'efficientformer'
def __init__( self : int , UpperCamelCase_ : Union[str, Any] = [3, 2, 6, 4] , UpperCamelCase_ : List[Any] = [48, 96, 2_24, 4_48] , UpperCamelCase_ : List[str] = [True, True, True, True] , UpperCamelCase_ : Any = 4_48 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = 4 , UpperCamelCase_ : str = 7 , UpperCamelCase_ : List[Any] = 5 , UpperCamelCase_ : Dict = 8 , UpperCamelCase_ : Optional[int] = 4 , UpperCamelCase_ : int = 0.0 , UpperCamelCase_ : Union[str, Any] = 16 , UpperCamelCase_ : Union[str, Any] = 3 , UpperCamelCase_ : Dict = 3 , UpperCamelCase_ : Optional[Any] = 3 , UpperCamelCase_ : Dict = 2 , UpperCamelCase_ : List[str] = 1 , UpperCamelCase_ : Union[str, Any] = 0.0 , UpperCamelCase_ : List[str] = 1 , UpperCamelCase_ : Dict = True , UpperCamelCase_ : Any = True , UpperCamelCase_ : Any = 1e-5 , UpperCamelCase_ : Optional[Any] = "gelu" , UpperCamelCase_ : List[str] = 0.02 , UpperCamelCase_ : Union[str, Any] = 1e-12 , UpperCamelCase_ : Any = 2_24 , UpperCamelCase_ : Any = 1e-05 , **UpperCamelCase_ : Dict , ) -> Union[str, Any]:
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ :int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ :str = num_attention_heads
SCREAMING_SNAKE_CASE__ :int = initializer_range
SCREAMING_SNAKE_CASE__ :Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ :Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ :int = num_channels
SCREAMING_SNAKE_CASE__ :Optional[int] = depths
SCREAMING_SNAKE_CASE__ :List[Any] = mlp_expansion_ratio
SCREAMING_SNAKE_CASE__ :int = downsamples
SCREAMING_SNAKE_CASE__ :Dict = dim
SCREAMING_SNAKE_CASE__ :List[Any] = key_dim
SCREAMING_SNAKE_CASE__ :List[Any] = attention_ratio
SCREAMING_SNAKE_CASE__ :str = resolution
SCREAMING_SNAKE_CASE__ :Optional[int] = pool_size
SCREAMING_SNAKE_CASE__ :List[Any] = downsample_patch_size
SCREAMING_SNAKE_CASE__ :int = downsample_stride
SCREAMING_SNAKE_CASE__ :Any = downsample_pad
SCREAMING_SNAKE_CASE__ :Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ :List[str] = num_metaad_blocks
SCREAMING_SNAKE_CASE__ :Any = distillation
SCREAMING_SNAKE_CASE__ :Tuple = use_layer_scale
SCREAMING_SNAKE_CASE__ :Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE__ :List[Any] = image_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = batch_norm_eps
| 209 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCamelCase (lowerCAmelCase : List[Any] ) -> Optional[int]:
A = []
A = []
A = []
for rt in rc.restypes:
A = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A = {name: i for i, name in enumerate(lowerCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
A = torch.tensor(
lowerCAmelCase, dtype=torch.intaa, device=protein['aatype'].device, )
A = torch.tensor(
lowerCAmelCase, dtype=torch.intaa, device=protein['aatype'].device, )
A = torch.tensor(
lowerCAmelCase, dtype=torch.floataa, device=protein['aatype'].device, )
A = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A = restype_atomaa_to_atomaa[protein_aatype]
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
A = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A = restype_atomaa_to_atomaa[protein_aatype]
A = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A = torch.zeros([21, 37], dtype=torch.floataa, device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
A = rc.restype_atoa[restype_letter]
A = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A = rc.atom_order[atom_name]
A = 1
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
return protein
def __UpperCamelCase (lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A = tree_map(lambda lowerCAmelCase : torch.tensor(lowerCAmelCase, device=batch['aatype'].device ), lowerCAmelCase, np.ndarray )
A = tensor_tree_map(lambda lowerCAmelCase : np.array(lowerCAmelCase ), make_atomaa_masks(lowerCAmelCase ) )
return out
| 699 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 0 |
import argparse
import datetime
def lowerCamelCase__ ( snake_case_ : Dict ) -> Union[str, Any]:
__snake_case = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__snake_case = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case_ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
__snake_case = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
__snake_case = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__snake_case = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__snake_case = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__snake_case = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__snake_case = datetime.date(int(snake_case_ ) , int(snake_case_ ) , int(snake_case_ ) )
# Start math
if m <= 2:
__snake_case = y - 1
__snake_case = m + 12
# maths var
__snake_case = int(str(snake_case_ )[:2] )
__snake_case = int(str(snake_case_ )[2:] )
__snake_case = int(2.6 * m - 5.39 )
__snake_case = int(c / 4 )
__snake_case = int(k / 4 )
__snake_case = int(d + k )
__snake_case = int(t + u + v + x )
__snake_case = int(z - (2 * c) )
__snake_case = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__snake_case = f"""Your date {date_input}, is a {days[str(snake_case_ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
snake_case_ = parser.parse_args()
zeller(args.date_input)
| 592 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = np.shape(_A )
if rows != columns:
_lowercase = (
'''\'table\' has to be of square shaped array but got a '''
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_A )
_lowercase = np.zeros((rows, columns) )
_lowercase = np.zeros((rows, columns) )
for i in range(_A ):
for j in range(_A ):
_lowercase = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowercase = (table[i][j] - total) / upper[j][j]
_lowercase = 1
for j in range(_A ,_A ):
_lowercase = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
_lowercase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __A ( _A ):
"""simple docstring"""
return getitem, k
def __A ( _A , _A ):
"""simple docstring"""
return setitem, k, v
def __A ( _A ):
"""simple docstring"""
return delitem, k
def __A ( _A , _A , *_A ):
"""simple docstring"""
try:
return fun(_A , *_A ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE : Dict = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE : Dict = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE : str = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __A ( _A ):
"""simple docstring"""
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(_A ):
__a = _run_operation(_A , _A , *_A )
__a = _run_operation(_A , _A , *_A )
assert my_res == py_res
assert str(_A ) == str(_A )
assert set(_A ) == set(_A )
assert len(_A ) == len(_A )
assert set(my.items() ) == set(py.items() )
def __A ( ):
"""simple docstring"""
def is_public(_A ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(_A )}
__a = {name for name in dir(HashMap() ) if is_public(_A )}
assert dict_public_names > hash_public_names
| 197 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A__ :
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = None
def UpperCamelCase__ ( self ):
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 681 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case ( snake_case : Union[str, Any] , snake_case : Tuple=False ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def snake_case ( snake_case : str , snake_case : Optional[Any] , snake_case : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase = ''''''
else:
lowerCAmelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase = in_proj_bias[: config.hidden_size]
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def snake_case ( snake_case : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def snake_case ( snake_case : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def snake_case ( snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = dct.pop(snake_case )
lowerCAmelCase = val
def snake_case ( snake_case : Optional[int] , snake_case : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase = ViTMSNConfig()
lowerCAmelCase = 1000
lowerCAmelCase = '''datasets/huggingface/label-files'''
lowerCAmelCase = '''imagenet-1k-id2label.json'''
lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case ) , 'r' ) )
lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase = 384
lowerCAmelCase = 1536
lowerCAmelCase = 6
elif "l16" in checkpoint_url:
lowerCAmelCase = 1024
lowerCAmelCase = 4096
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase = 4
elif "l7" in checkpoint_url:
lowerCAmelCase = 7
lowerCAmelCase = 1024
lowerCAmelCase = 4096
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 0.1
lowerCAmelCase = ViTMSNModel(snake_case )
lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' )['''target_encoder''']
lowerCAmelCase = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case )
lowerCAmelCase = create_rename_keys(snake_case , base_model=snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , base_model=snake_case )
model.load_state_dict(snake_case )
model.eval()
lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
lowerCAmelCase = ViTImageProcessor(
size=config.image_size , image_mean=snake_case , image_std=snake_case )
lowerCAmelCase = image_processor(images=snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**snake_case )
lowerCAmelCase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 284 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
import numpy as np
def __lowerCamelCase ( A__ : Optional[int] , A__ : List[Any] , A__ : List[str] = 1e-12 , A__ : Union[str, Any] = 100 , ) -> Tuple:
assert np.shape(A__ )[0] == np.shape(A__ )[1]
# Ensure proper dimensionality.
assert np.shape(A__ )[0] == np.shape(A__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A__ ) == np.iscomplexobj(A__ )
lowerCamelCase_ : List[str] = np.iscomplexobj(A__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : str = 0
lowerCamelCase_ : Any = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ : Optional[Any] = np.dot(A__ , A__ )
# Normalize the resulting output vector.
lowerCamelCase_ : str = w / np.linalg.norm(A__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ : str = vector.conj().T if is_complex else vector.T
lowerCamelCase_ : str = np.dot(A__ , np.dot(A__ , A__ ) )
# Check convergence.
lowerCamelCase_ : str = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ : Any = True
lowerCamelCase_ : Optional[int] = lambda_
if is_complex:
lowerCamelCase_ : str = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ( ) -> int:
lowerCamelCase_ : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ : Optional[int] = np.array([41, 4, 20] )
lowerCamelCase_ : str = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ : int = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ : Tuple = real_input_matrix
lowerCamelCase_ : List[str] = real_vector
elif problem_type == "complex":
lowerCamelCase_ : Dict = complex_input_matrix
lowerCamelCase_ : Tuple = complex_vector
# Our implementation.
lowerCamelCase_ : Optional[Any] = power_iteration(A__ , A__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ : Any = np.linalg.eigh(A__ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A__ ) - np.abs(A__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 278 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 0 |
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
while b:
__lowercase= b, a % b
return a
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowercase__ , a % b )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 230 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Union[str, Any] = 42
UpperCamelCase : Tuple = 42
def __init__( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __magic_name__ = 1 , __magic_name__ = 2_0_0_0 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = self.unet.config.sample_size
_lowerCAmelCase = (batch_size, 3, img_size, img_size)
_lowerCAmelCase = self.unet
_lowerCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_lowerCAmelCase = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
_lowerCAmelCase = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
_lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_lowerCAmelCase = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
_lowerCAmelCase = output.prev_sample, output.prev_sample_mean
_lowerCAmelCase = sample_mean.clamp(0 , 1 )
_lowerCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = ["image_processor", "tokenizer"]
_UpperCAmelCase : str = "LayoutLMv3ImageProcessor"
_UpperCAmelCase : Tuple = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Any , A : Tuple=None , A : int=None , **A : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
lowerCamelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
lowerCamelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Optional[Any] , A : Any , A : int = None , A : Optional[int] = None , A : Optional[int] = None , A : Dict = None , A : List[str] = True , A : Tuple = False , A : Any = None , A : Optional[Any] = None , A : Any = 0 , A : Optional[Any] = None , A : List[str] = None , A : Optional[int] = None , A : int = False , A : Optional[Any] = False , A : Dict = False , A : Union[str, Any] = False , A : Tuple = True , A : Optional[Any] = None , **A : List[Any] , ) ->List[str]:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
lowerCamelCase__ : Union[str, Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase__ : Union[str, Any] = features['''words''']
lowerCamelCase__ : Dict = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
lowerCamelCase__ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase__ : Any = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase__ : List[str] = images
return encoded_inputs
def __lowerCamelCase ( self : int , A : str , A : Any ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}" )
return images_with_overflow
def __lowerCamelCase ( self : Union[str, Any] , *A : Any , **A : Any ) ->Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self : Dict , *A : Optional[int] , **A : Any ) ->str:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self : Tuple ) ->str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCamelCase ( self : Tuple ) ->Union[str, Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : str ) ->Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 315 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A = self.model.config
else:
A = config
A = data_args
A = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A = label_smoothed_nll_loss
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
if self.optimizer is None:
A = ['''bias''', '''LayerNorm.weight''']
A = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A = Adafactor
A = {'''scale_parameter''': False, '''relative_step''': False}
else:
A = AdamW
A = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
A = self.args.learning_rate
if self.sharded_ddp:
A = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
A = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
A = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : int ):
A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def UpperCamelCase ( self : str ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
A = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
A = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
A = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
A = inputs.pop('labels' )
A = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] = None , ):
A = self._prepare_inputs(__lowerCAmelCase )
A = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['max_length'] )
A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
A = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ):
A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f''' padded to `max_length`={max_length}''' )
A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A = tensor
return padded_tensor
| 699 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 0 |
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Tuple ) -> Optional[int]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 0 |
def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not isinstance(_A ,_A ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
super().__init__(self , **__lowerCAmelCase )
__a = repo_info
__a = token
__a = None
def _UpperCAmelCase ( self : Any ):
if self.dir_cache is None:
__a = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {"name": str(__lowerCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] = "rb" , **__SCREAMING_SNAKE_CASE : int , ):
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
self._get_dirs()
__a = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Tuple ):
self._get_dirs()
__a = PurePosixPath(path.strip("/" ) )
__a = {}
for p, f in self.dir_cache.items():
__a = PurePosixPath(p.strip("/" ) )
__a = p.parent
if root == path:
__a = f
__a = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 197 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : str = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 681 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 0 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = n_token
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [n_token]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase = nn.ModuleList()
lowerCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
lowerCAmelCase = keep_order
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if proj is None:
lowerCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
lowerCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase = hidden[..., :-1, :].contiguous()
lowerCAmelCase = labels[..., 1:].contiguous()
lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase = labels != -1_00
lowerCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i].weight
lowerCAmelCase = self.out_layers[i].bias
if i == 0:
lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase = 0
lowerCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
lowerCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
lowerCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
lowerCAmelCase = hidden
if i == 0:
if labels is not None:
lowerCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i].weight
lowerCAmelCase = self.out_layers[i].bias
if i == 0:
lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
lowerCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
lowerCAmelCase = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase = logprob_i
return out
| 284 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ : int = ["""text""", """image""", """audio"""]
def __lowerCamelCase ( A__ : Dict ) -> List[str]:
lowerCamelCase_ : Dict = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(A__ , A__ ):
inputs.append(create_inputs(A__ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __lowerCamelCase ( A__ : int ) -> List[str]:
lowerCamelCase_ : int = []
for output in outputs:
if isinstance(A__ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(A__ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(A__ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def _lowerCAmelCase ( self : str ) ->Tuple:
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
lowerCamelCase_ : Union[str, Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase_ : Optional[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCAmelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase_ : List[Any] = create_inputs(self.tool.inputs )
lowerCamelCase_ : Union[str, Any] = self.tool(*__lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(__lowerCAmelCase ) , self.tool.outputs )
def _lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _lowerCAmelCase ( self : str ) ->List[str]:
lowerCamelCase_ : str = create_inputs(self.tool.inputs )
lowerCamelCase_ : Optional[Any] = self.tool(*__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase_ : str = [outputs]
self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCAmelCase , self.tool.outputs ):
lowerCamelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase ( self : str ) ->List[Any]:
lowerCamelCase_ : List[str] = create_inputs(self.tool.inputs )
lowerCamelCase_ : Tuple = []
for _input, input_type in zip(__lowerCAmelCase , self.tool.inputs ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase_ : Optional[int] = self.tool(*__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase_ : Any = [outputs]
self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
| 278 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase = """roberta"""
elif args.model_type == "gpt2":
lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase = """transformer"""
lowerCAmelCase = model.state_dict()
lowerCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase = F'{prefix}.embeddings.{w}.weight'
lowerCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase = F'{prefix}.embeddings.LayerNorm.{w}'
lowerCAmelCase = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowerCAmelCase = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase = state_dict[F'lm_head.dense.{w}']
lowerCAmelCase = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase = state_dict[F'{prefix}.ln_f.{w}']
lowerCAmelCase = state_dict["""lm_head.weight"""]
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 230 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a__ : Optional[Any] = TypeVar("""T""")
class __magic_name__ ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = len(__lowerCAmelCase )
_lowerCAmelCase = [any_type for _ in range(self.N )] + arr
_lowerCAmelCase = fnc
self.build()
def _lowerCamelCase ( self ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
_lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
p += self.N
_lowerCAmelCase = v
while p > 1:
_lowerCAmelCase = p // 2
_lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ): # noqa: E741
"""simple docstring"""
_lowerCAmelCase = l + self.N, r + self.N
_lowerCAmelCase = None
while l <= r:
if l % 2 == 1:
_lowerCAmelCase = self.st[l] if res is None else self.fn(__lowerCAmelCase , self.st[l] )
if r % 2 == 0:
_lowerCAmelCase = self.st[r] if res is None else self.fn(__lowerCAmelCase , self.st[r] )
_lowerCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a__ : Any = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a__ : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a__ : Union[str, Any] = SegmentTree(test_array, min)
a__ : Any = SegmentTree(test_array, max)
a__ : Any = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
"""simple docstring"""
for i in range(len(__lowerCamelCase ) ):
for j in range(__lowerCamelCase, len(__lowerCamelCase ) ):
_lowerCAmelCase = reduce(__lowerCamelCase, test_array[i : j + 1] )
_lowerCAmelCase = reduce(__lowerCamelCase, test_array[i : j + 1] )
_lowerCAmelCase = reduce(lambda __lowerCamelCase, __lowerCamelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCamelCase, __lowerCamelCase )
assert max_range == max_segment_tree.query(__lowerCamelCase, __lowerCamelCase )
assert sum_range == sum_segment_tree.query(__lowerCamelCase, __lowerCamelCase )
test_all_segments()
for index, value in test_updates.items():
a__ : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 589 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _a ( ) -> Dict:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _a ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 315 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase_ = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(__lowerCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def __lowerCamelCase ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None ) -> Dict:
SCREAMING_SNAKE_CASE__ :Tuple = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE__ :List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE__ :List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
SCREAMING_SNAKE_CASE__ :Dict = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :int = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(__lowerCAmelCase , 'w' , newline='\n' ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , __lowerCAmelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE__ :List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub('Bert' , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , __lowerCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , __lowerCAmelCase ) , )
| 209 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''gptj'''
SCREAMING_SNAKE_CASE : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , UpperCamelCase__ : Optional[int]=50400 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : List[Any]=4096 , UpperCamelCase__ : Dict=28 , UpperCamelCase__ : str=16 , UpperCamelCase__ : str=64 , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]="gelu_new" , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Dict=1e-5 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=50256 , UpperCamelCase__ : Dict=50256 , UpperCamelCase__ : str=False , **UpperCamelCase__ : Optional[int] , ):
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = rotary_dim
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = bos_token_id
A = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str = "default" , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Union[str, Any] = False , ):
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , __lowerCAmelCase ):
# TODO: how to do that better?
A = 0
@property
def UpperCamelCase ( self : Optional[int] ):
A = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='inputs' )
A = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase ( self : List[str] ):
return self._config.n_layer
@property
def UpperCamelCase ( self : int ):
return self._config.n_head
def UpperCamelCase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Any = -1 , UpperCamelCase__ : str = -1 , UpperCamelCase__ : List[str] = False , UpperCamelCase__ : Optional[Any] = None , ):
A = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
A = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A = seqlen + 2
A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
A = common_inputs['''attention_mask''']
if self.use_past:
A = ordered_inputs['''attention_mask'''].dtype
A = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Tuple ):
return 13
| 699 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> List[Any]:
__snake_case = str(snake_case_ )
return n == n[::-1]
def lowerCamelCase__ ( snake_case_ : Any = 100_0000 ) -> Dict:
__snake_case = 0
for i in range(1 , snake_case_ ):
if is_palindrome(snake_case_ ) and is_palindrome(bin(snake_case_ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 592 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_: int = logging.get_logger(__name__)
A_: Any = {"""vocab_file""": """spiece.model"""}
A_: str = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
A_: Union[str, Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
A_: List[str] = """▁"""
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
_lowercase = ''' '''.join(inputs.strip().split() )
else:
_lowercase = inputs
_lowercase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
_lowercase = unicodedata.normalize("""NFKD""" , __lowerCAmelCase )
_lowercase = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
_lowercase = outputs.lower()
return outputs
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.preprocess_text(__lowerCAmelCase )
_lowercase = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
_lowercase = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase = cur_pieces[1:]
else:
_lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
_lowercase = ''''''
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowercase = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 398 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_multiple_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = weight_tying
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
lowerCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
lowerCAmelCase = output_from_no_past['''hidden_states'''][0]
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTNeoXJapaneseModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '''abeja/gpt-neox-japanese-2.7b'''
lowerCAmelCase = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowerCAmelCase = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowerCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
lowerCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
lowerCAmelCase = []
for prompt in prompts:
lowerCAmelCase = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids
lowerCAmelCase = model.generate(__lowerCAmelCase , max_length=50 )
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 284 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case__ : str = TypeVar('T')
snake_case__ : int = TypeVar('U')
class SCREAMING_SNAKE_CASE_ (Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : Optional[int] ) ->Union[str, Any]:
lowerCamelCase_ : Tuple = key
lowerCamelCase_ : List[str] = val
lowerCamelCase_ : DoubleLinkedListNode[T, U] | None = None
lowerCamelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Any ) ->Optional[int]:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class SCREAMING_SNAKE_CASE_ (Generic[T, U] ):
'''simple docstring'''
def __init__( self : List[Any] ) ->Any:
lowerCamelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = self.rear, self.head
def __repr__( self : Optional[Any] ) ->int:
lowerCamelCase_ : Any = ['''DoubleLinkedList''']
lowerCamelCase_ : Any = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
lowerCamelCase_ : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def _lowerCAmelCase ( self : Any , __a : List[Any] ) ->Optional[int]:
lowerCamelCase_ : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCamelCase_ : str = node
lowerCamelCase_ : str = previous
lowerCamelCase_ : Dict = node
lowerCamelCase_ : Optional[Any] = self.rear
def _lowerCAmelCase ( self : Optional[Any] , __a : Optional[int] ) ->Union[str, Any]:
if node.prev is None or node.next is None:
return None
lowerCamelCase_ : str = node.next
lowerCamelCase_ : Any = node.prev
lowerCamelCase_ : int = None
lowerCamelCase_ : List[str] = None
return node
class SCREAMING_SNAKE_CASE_ (Generic[T, U] ):
'''simple docstring'''
_a = {}
def __init__( self : str , __a : Optional[int] ) ->Optional[int]:
lowerCamelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCamelCase_ : Dict = capacity
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : str ) ->int:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Tuple , __a : str ) ->List[str]:
return key in self.cache
def _lowerCAmelCase ( self : Optional[int] , __a : List[Any] ) ->List[Any]:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCamelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCamelCase_ : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def _lowerCAmelCase ( self : Dict , __a : int , __a : Any ) ->Union[str, Any]:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCamelCase_ : Union[str, Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCamelCase_ : List[Any] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCamelCase_ : str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCamelCase_ : Any = value
self.list.add(__lowerCAmelCase )
@classmethod
def _lowerCAmelCase ( cls : str , __a : str = 128 ) ->Dict:
def cache_decorator_inner(__a : List[str] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : Union[str, Any] ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCamelCase_ : List[str] = LRUCache(__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCamelCase_ : Optional[Any] = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , """cache_info""" , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class A :
UpperCamelCase_ : Union[str, Any] =42
UpperCamelCase_ : Any =None
UpperCamelCase_ : Any =None
UpperCamelCase_ : int =None
UpperCamelCase_ : Any =None
def _A (self ):
__lowercase= _str_to_version_tuple(self.version_str )
def __repr__(self ):
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _A (self ):
return self.major, self.minor, self.patch
def _A (self , lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return Version(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return other
raise TypeError(f'{other} (type {type(__lowerCAmelCase )}) cannot be compared to version.' )
def __eq__(self , lowerCAmelCase ):
try:
__lowercase= self._validate_operand(__lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self , lowerCAmelCase ):
__lowercase= self._validate_operand(__lowerCAmelCase )
return self.tuple < other.tuple
def __hash__(self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _A (cls , lowerCAmelCase ):
__lowercase= {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _A (self ):
return self.version_str
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= _VERSION_REG.match(lowercase__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(lowercase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
return ".".join(str(lowercase__ ) for v in version_tuple )
| 230 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__lowerCAmelCase , cache_dir=__lowerCAmelCase )
_lowerCAmelCase = [t[-1] for t in os.walk(os.path.join(__lowerCAmelCase , os.listdir(__lowerCAmelCase )[0] , 'snapshots' ) )]
_lowerCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__lowerCAmelCase )
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 4
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_lowerCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCAmelCase ) == num_samples
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=__lowerCAmelCase )
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 5_0
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase )
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 5_0
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 5_0
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
_lowerCAmelCase = scheduler.create_state()
_lowerCAmelCase = scheduler_state
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 5_0
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , __lowerCAmelCase )
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , )
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , use_memory_efficient_attention=__lowerCAmelCase , )
_lowerCAmelCase = replicate(__lowerCAmelCase )
_lowerCAmelCase = pipeline.prepare_inputs(__lowerCAmelCase )
_lowerCAmelCase = shard(__lowerCAmelCase )
_lowerCAmelCase = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_validate_point(UpperCAmelCase )
_validate_point(UpperCAmelCase )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ) ) )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
if point:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for item in point:
if not isinstance(UpperCAmelCase , (int, float) ):
lowerCamelCase__ : str = (
'''Expected a list of numbers as input, found '''
f"{type(UpperCAmelCase ).__name__}"
)
raise TypeError(UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = f"Expected a list of numbers as input, found {type(UpperCAmelCase ).__name__}"
raise TypeError(UpperCAmelCase )
else:
raise ValueError('''Missing an input''' )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
_validate_point(UpperCAmelCase )
_validate_point(UpperCAmelCase )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase , UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE:
def __init__( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=3 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Any=[8, 16, 32, 64] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]="relu" , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : int=None , UpperCamelCase_ : str=["stage2", "stage3", "stage4"] , UpperCamelCase_ : int=[2, 3, 4] , UpperCamelCase_ : Any=1 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = parent
SCREAMING_SNAKE_CASE__ :str = batch_size
SCREAMING_SNAKE_CASE__ :List[Any] = image_size
SCREAMING_SNAKE_CASE__ :Any = num_channels
SCREAMING_SNAKE_CASE__ :Optional[Any] = embeddings_size
SCREAMING_SNAKE_CASE__ :Any = hidden_sizes
SCREAMING_SNAKE_CASE__ :Tuple = depths
SCREAMING_SNAKE_CASE__ :Any = is_training
SCREAMING_SNAKE_CASE__ :Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ :List[str] = hidden_act
SCREAMING_SNAKE_CASE__ :Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ :Any = scope
SCREAMING_SNAKE_CASE__ :Dict = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = out_features
SCREAMING_SNAKE_CASE__ :str = out_indices
SCREAMING_SNAKE_CASE__ :Tuple = num_groups
def __lowerCamelCase ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : str ) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Dict = BitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> str:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE__ :int = BitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Any:
SCREAMING_SNAKE_CASE__ :List[Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[int] = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ :Any = None
SCREAMING_SNAKE_CASE__ :Union[str, Any] = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCamelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ :int = config_and_inputs
SCREAMING_SNAKE_CASE__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : Tuple = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A_ : Tuple = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = False
A_ : Dict = False
A_ : List[Any] = False
A_ : Dict = False
A_ : Optional[int] = False
def __lowerCamelCase ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ :Dict = BitModelTester(self )
SCREAMING_SNAKE_CASE__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __lowerCamelCase ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self : Any ) -> str:
return
@unittest.skip(reason='Bit does not output attentions' )
def __lowerCamelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __lowerCamelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __lowerCamelCase ( self : List[str] ) -> int:
pass
def __lowerCamelCase ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :int = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ :int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ :Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __lowerCamelCase ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def __lowerCamelCase ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Tuple = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ :Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Tuple = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ :List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ :int = layer_type
SCREAMING_SNAKE_CASE__ :Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ :Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __lowerCamelCase ( self : Dict ) -> int:
pass
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ :List[str] = BitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[str] ) -> Tuple:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ :int = prepare_img()
SCREAMING_SNAKE_CASE__ :Tuple = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Tuple = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ :int = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
A_ : Tuple = BitConfig
A_ : Optional[Any] = False
def __lowerCamelCase ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = BitModelTester(self )
| 209 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE__ : int = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase ) ),
}
| 0 | 0 |
import requests
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any ) -> Any:
A = {'''Content-Type''': '''application/json'''}
A = requests.post(lowerCAmelCase, json={'text': message_body}, headers=lowerCAmelCase )
if response.status_code != 200:
A = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 699 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
snake_case_ = logging.get_logger(__name__)
# General docstring
snake_case_ = """MobileNetV1Config"""
# Base docstring
snake_case_ = """google/mobilenet_v1_1.0_224"""
snake_case_ = [1, 1024, 7, 7]
# Image classification docstring
snake_case_ = """google/mobilenet_v1_1.0_224"""
snake_case_ = """tabby, tabby cat"""
snake_case_ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ) -> Any:
__snake_case = {}
if isinstance(snake_case_ , snake_case_ ):
__snake_case = model.mobilenet_va
else:
__snake_case = model
__snake_case = '''MobilenetV1/Conv2d_0/'''
__snake_case = backbone.conv_stem.convolution.weight
__snake_case = backbone.conv_stem.normalization.bias
__snake_case = backbone.conv_stem.normalization.weight
__snake_case = backbone.conv_stem.normalization.running_mean
__snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__snake_case = i + 1
__snake_case = i * 2
__snake_case = backbone.layer[pt_index]
__snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
__snake_case = backbone.layer[pt_index + 1]
__snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
if isinstance(snake_case_ , snake_case_ ):
__snake_case = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__snake_case = model.classifier.weight
__snake_case = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int , snake_case_ : List[str] ) -> int:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__snake_case = tf.train.list_variables(snake_case_ )
__snake_case = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
__snake_case = tf.train.load_variable(snake_case_ , snake_case_ )
__snake_case = array
# Build TF to PyTorch weights loading map
__snake_case = _build_tf_to_pytorch_map(snake_case_ , snake_case_ , snake_case_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
__snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__snake_case = np.transpose(snake_case_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__snake_case = array.squeeze().transpose()
else:
__snake_case = np.transpose(snake_case_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
__snake_case = torch.from_numpy(snake_case_ )
tf_weights.pop(snake_case_ , snake_case_ )
tf_weights.pop(name + '''/RMSProp''' , snake_case_ )
tf_weights.pop(name + '''/RMSProp_1''' , snake_case_ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , snake_case_ )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[Any] ) -> Dict:
__snake_case = features.shape[-2:]
__snake_case = conv_layer.stride
__snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
__snake_case = max(kernel_height - stride_height , 0 )
else:
__snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__snake_case = max(kernel_width - stride_width , 0 )
else:
__snake_case = max(kernel_width - (in_width % stride_width) , 0 )
__snake_case = pad_along_width // 2
__snake_case = pad_along_width - pad_left
__snake_case = pad_along_height // 2
__snake_case = pad_along_height - pad_top
__snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case_ , snake_case_ , '''constant''' , 0.0 )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Tuple , a__ : Dict , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : str = 1 , a__ : Union[str, Any] = 1 , a__ : Tuple = False , a__ : Optional[int] = True , a__ : List[str] = True , ):
"""simple docstring"""
super().__init__()
__snake_case = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__snake_case = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase , groups=__lowerCAmelCase , bias=__lowerCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
__snake_case = nn.BatchNormad(
num_features=__lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__lowerCAmelCase , track_running_stats=__lowerCAmelCase , )
else:
__snake_case = None
if use_activation:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCAmelCase ):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
else:
__snake_case = None
def a (self : Dict , a__ : Any ):
"""simple docstring"""
if self.config.tf_padding:
__snake_case = apply_tf_padding(__lowerCAmelCase , self.convolution )
__snake_case = self.convolution(__lowerCAmelCase )
if self.normalization is not None:
__snake_case = self.normalization(__lowerCAmelCase )
if self.activation is not None:
__snake_case = self.activation(__lowerCAmelCase )
return features
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : int = MobileNetVaConfig
A_ : List[Any] = load_tf_weights_in_mobilenet_va
A_ : Tuple = 'mobilenet_v1'
A_ : Optional[int] = 'pixel_values'
A_ : Tuple = False
def a (self : Dict , a__ : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
snake_case_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , _UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , a__ : Optional[int] , a__ : int = True ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__snake_case = config
__snake_case = 32
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
__snake_case = MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=config.num_channels , out_channels=__lowerCAmelCase , kernel_size=3 , stride=2 , )
__snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__snake_case = nn.ModuleList()
for i in range(13 ):
__snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=1 , ) )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a (self : str , a__ : Dict ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a (self : Optional[Any] , a__ : int = None , a__ : Dict = None , a__ : Tuple = None , ):
"""simple docstring"""
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__snake_case = self.conv_stem(__lowerCAmelCase )
__snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__snake_case = layer_module(__lowerCAmelCase )
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
__snake_case = hidden_states
if self.pooler is not None:
__snake_case = torch.flatten(self.pooler(__lowerCAmelCase ) , start_dim=1 )
else:
__snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : Any ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
__snake_case = config.num_labels
__snake_case = MobileNetVaModel(__lowerCAmelCase )
__snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCAmelCase )
__snake_case = nn.Linear(__lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a (self : Optional[int] , a__ : Tuple = None , a__ : Optional[Any] = None , a__ : str = None , a__ : Any = None , ):
"""simple docstring"""
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.mobilenet_va(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(self.dropout(__lowerCAmelCase ) )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = '''single_label_classification'''
else:
__snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , )
| 592 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'question': Value('string' ), 'context': Value('string' )} )
lowerCAmelCase__ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowerCAmelCase__ = 'question'
lowerCAmelCase__ = 'context'
lowerCAmelCase__ = 'answers'
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 398 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |
import math
def __A ( _A , _A = 0 , _A = 0 ):
"""simple docstring"""
__a = end or len(_A )
for i in range(_A , _A ):
__a = i
__a = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__a = array[temp_index - 1]
temp_index -= 1
__a = temp_index_value
return array
def __A ( _A , _A , _A ): # Max Heap
"""simple docstring"""
__a = index
__a = 2 * index + 1 # Left Node
__a = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__a = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__a = right_index
if largest != index:
__a = array[largest], array[index]
heapify(_A , _A , _A )
def __A ( _A ):
"""simple docstring"""
__a = len(_A )
for i in range(n // 2 , -1 , -1 ):
heapify(_A , _A , _A )
for i in range(n - 1 , 0 , -1 ):
__a = array[0], array[i]
heapify(_A , 0 , _A )
return array
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = low
__a = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__a = array[j], array[i]
i += 1
def __A ( _A ):
"""simple docstring"""
if len(_A ) == 0:
return array
__a = 2 * math.ceil(math.loga(len(_A ) ) )
__a = 16
return intro_sort(_A , 0 , len(_A ) , _A , _A )
def __A ( _A , _A , _A , _A , _A ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_A )
max_depth -= 1
__a = median_of_a(_A , _A , start + ((end - start) // 2) + 1 , end - 1 )
__a = partition(_A , _A , _A , _A )
intro_sort(_A , _A , _A , _A , _A )
__a = p
return insertion_sort(_A , _A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Any = input("""Enter numbers separated by a comma : """).strip()
SCREAMING_SNAKE_CASE : List[Any] = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 197 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowerCamelCase ="""\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_lowerCamelCase ="""\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_lowerCamelCase =r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = 0.0
for i, j in zip(__lowerCAmelCase , __lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowerCAmelCase , __lowerCAmelCase ) else 0.0
lowerCamelCase : Dict = n_correct / len(__lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 681 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Tuple = '''luke'''
def __init__( self , _SCREAMING_SNAKE_CASE=5_02_67 , _SCREAMING_SNAKE_CASE=50_00_00 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 284 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=[] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = vocab_file
__magic_name__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def A ( self ):
"""simple docstring"""
__magic_name__ :str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.__dict__.copy()
__magic_name__ :Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ :Optional[int] = {}
__magic_name__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ :int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
__magic_name__ :Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
__magic_name__ :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
__magic_name__ :Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 0 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ (a__ , unittest.TestCase ):
'''simple docstring'''
_a = CTRLTokenizer
_a = False
_a = False
def _lowerCAmelCase ( self : List[str] ) ->List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ : List[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowerCamelCase_ : Optional[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase_ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowerCamelCase_ : str = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def _lowerCAmelCase ( self : List[Any] , **__a : Tuple ) ->Optional[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase ( self : int , __a : int ) ->Any:
lowerCamelCase_ : Optional[int] = '''adapt react readapt apt'''
lowerCamelCase_ : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase_ : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ : List[Any] = '''adapt react readapt apt'''
lowerCamelCase_ : str = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowerCamelCase_ : List[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ : List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase_ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 278 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if "model" in orig_key:
__lowercase= orig_key.replace('model.' , '' )
if "norm1" in orig_key:
__lowercase= orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
__lowercase= orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
__lowercase= orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
__lowercase= orig_key.split('.' )[0].split('_' )[-1]
__lowercase= orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__lowercase= orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
__lowercase= orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
__lowercase= orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
__lowercase= orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
__lowercase= orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
__lowercase= orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
__lowercase= orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
__lowercase= orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
__lowercase= orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
__lowercase= orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
__lowercase= '''yoso.''' + orig_key
return orig_key
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase= orig_state_dict.pop(lowercase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__lowercase= val
__lowercase= orig_state_dict['''cls.predictions.decoder.bias''']
__lowercase= torch.arange(lowercase__ ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= torch.load(lowercase__ , map_location='cpu' )['''model_state_dict''']
__lowercase= YosoConfig.from_json_file(lowercase__ )
__lowercase= YosoForMaskedLM(lowercase__ )
__lowercase= convert_checkpoint_helper(config.max_position_embeddings , lowercase__ )
print(model.load_state_dict(lowercase__ ) )
model.eval()
model.save_pretrained(lowercase__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 230 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = eval_examples
__magic_name__ :str = post_process_function
__magic_name__ :int = quant_trainer_args
__magic_name__ :List[str] = 1_2_8 # default number of calibration samples
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__magic_name__ :Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ :Optional[int] = self._remove_unused_columns(__lowerCAmelCase , description='''Calibration''' )
return DataLoader(
__lowerCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ :Any = self.get_calib_dataloader(__lowerCAmelCase )
__magic_name__ :List[str] = self.model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args , calib=__lowerCAmelCase )
model.eval()
quant_trainer.enable_calibration(__lowerCAmelCase )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__lowerCAmelCase ):
# Prediction step
__magic_name__ , __magic_name__ , __magic_name__ :str = self.prediction_step(__lowerCAmelCase , __lowerCAmelCase , prediction_loss_only=__lowerCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :Any = model
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval" ):
"""simple docstring"""
__magic_name__ :Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ :Optional[Any] = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Any = self.compute_metrics
__magic_name__ :List[Any] = None
__magic_name__ :List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :Optional[Any] = eval_loop(
__lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ :Union[str, Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions )
__magic_name__ :int = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :Dict = metrics.pop(__lowerCAmelCase )
self.log(__lowerCAmelCase )
else:
__magic_name__ :List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ :Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase )
return metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test" ):
"""simple docstring"""
__magic_name__ :int = self.get_test_dataloader(__lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ :Dict = self.compute_metrics
__magic_name__ :str = None
__magic_name__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ :int = eval_loop(
__lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , )
finally:
__magic_name__ :List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ :Optional[Any] = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , '''predict''' )
__magic_name__ :Dict = self.compute_metrics(__lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ :List[str] = metrics.pop(__lowerCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
def A ( self , __lowerCAmelCase="./" ):
"""simple docstring"""
__magic_name__ :List[Any] = self.eval_dataset
__magic_name__ :Any = self.get_eval_dataloader(__lowerCAmelCase )
__magic_name__ :int = next(iter(__lowerCAmelCase ) )
# saving device - to make it consistent
__magic_name__ :str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__magic_name__ :int = tuple(v.to(__lowerCAmelCase ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ :Any = True
__magic_name__ :Optional[int] = self.model.to(__lowerCAmelCase )
model.eval()
model.float()
__magic_name__ :Any = model.module if hasattr(__lowerCAmelCase , '''module''' ) else model
quant_trainer.configure_model(__lowerCAmelCase , self.quant_trainer_args )
__magic_name__ :int = os.path.join(__lowerCAmelCase , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__magic_name__ :Dict = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , export_params=__lowerCAmelCase , opset_version=1_3 , do_constant_folding=__lowerCAmelCase , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__lowerCAmelCase , )
logger.info('''onnx export finished''' )
| 0 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : List[str] = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Optional[int] = "timesformer"
def __init__( self , __magic_name__=2_2_4 , __magic_name__=1_6 , __magic_name__=3 , __magic_name__=8 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__="divided_space_time" , __magic_name__=0 , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attention_type
_lowerCAmelCase = drop_path_rate
| 589 |
def __lowercase ( snake_case ):
"""simple docstring"""
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def __lowercase ( snake_case ):
"""simple docstring"""
if (len(snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 1_6 ) for i in range(0, len(snake_case ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Optional[Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""PoolFormerFeatureExtractor"""]
_A : int = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 315 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = len(UpperCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCAmelCase__ , UpperCAmelCase__ , )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :list[list[str]] = []
depth_first_search([] , [] , [] , UpperCAmelCase__ , UpperCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCAmelCase__ )
print('' )
print(len(UpperCAmelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 209 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowercase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = 2
while True:
if is_prime(snake_case ):
yield num
num += 1
def __lowercase ( snake_case = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda snake_case : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : str ):
if tokenize_kwargs is None:
A = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A = truncation
A = tokenize_kwargs
A = {}
if return_tensors is not None:
A = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ):
A = self.framework
A = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
return model_inputs
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int ):
A = self.model(**__lowerCAmelCase )
return model_outputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 699 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__magic_name__ :List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :int = np.random.randn(3 , 4 , 5 )
__magic_name__ :Union[str, Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(3 , 4 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
__magic_name__ :Dict = np.random.randn(3 , 4 , 5 )
__magic_name__ :Dict = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.reshape(__lowerCAmelCase , (1_2, 5) ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Tuple = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , reshape(__lowerCAmelCase , (1_2, 5) ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Any = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
__magic_name__ :List[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ :List[str] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (1_2, 5) ) , np.asarray(reshape(__lowerCAmelCase , (1_2, 5) ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
__magic_name__ :Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = np.random.randn(1 , 3 , 4 )
__magic_name__ :List[Any] = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :List[str] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :str = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :int = np.random.randn(1 , 3 , 4 )
__magic_name__ :Tuple = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
__magic_name__ :Tuple = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = np.random.randn(1 , 3 , 4 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ :Optional[Any] = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = np.random.randn(3 , 4 )
__magic_name__ :Any = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ :Union[str, Any] = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = np.random.randn(3 , 4 )
__magic_name__ :Tuple = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 0 | 0 |
import random
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[str]:
__snake_case = a[left_index]
__snake_case = left_index + 1
for j in range(left_index + 1 , snake_case_ ):
if a[j] < pivot:
__snake_case = a[i], a[j]
i += 1
__snake_case = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ) -> List[str]:
if left < right:
__snake_case = random.randint(snake_case_ , right - 1 )
__snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case = partition(snake_case_ , snake_case_ , snake_case_ )
quick_sort_random(
snake_case_ , snake_case_ , snake_case_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case_ , pivot_index + 1 , snake_case_ ) # recursive quicksort to the right of the pivot point
def lowerCamelCase__ ( ) -> Any:
__snake_case = input('''Enter numbers separated by a comma:\n''' ).strip()
__snake_case = [int(snake_case_ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case_ , 0 , len(snake_case_ ) )
print(snake_case_ )
if __name__ == "__main__":
main()
| 592 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( _A ,_A ,_A ,_A="attention" ):
"""simple docstring"""
_lowercase = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_lowercase = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_lowercase = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_lowercase = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCAmelCase ( _A ,_A ,_A ,_A=False ):
"""simple docstring"""
if split_mlp_wi:
_lowercase = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_lowercase = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_lowercase = (wi_a, wi_a)
else:
_lowercase = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
_lowercase = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCAmelCase ( _A ,*, _A ,_A ):
"""simple docstring"""
_lowercase = traverse_util.flatten_dict(variables["""target"""] )
_lowercase = {'''/'''.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowercase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print("""Split MLP:""" ,_A )
_lowercase = collections.OrderedDict()
# Shared embeddings.
_lowercase = old['''token_embedder/embedding''']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
_lowercase = tax_layer_norm_lookup(_A ,_A ,"""encoder""" ,"""pre_attention_layer_norm""" )
_lowercase = tax_attention_lookup(_A ,_A ,"""encoder""" ,"""attention""" )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 1 (MLP).
_lowercase = tax_layer_norm_lookup(_A ,_A ,"""encoder""" ,"""pre_mlp_layer_norm""" )
_lowercase = tax_mlp_lookup(_A ,_A ,"""encoder""" ,_A )
_lowercase = layer_norm
if split_mlp_wi:
_lowercase = wi[0].T
_lowercase = wi[1].T
else:
_lowercase = wi.T
_lowercase = wo.T
_lowercase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
_lowercase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
_lowercase = tax_layer_norm_lookup(_A ,_A ,"""decoder""" ,"""pre_self_attention_layer_norm""" )
_lowercase = tax_attention_lookup(_A ,_A ,"""decoder""" ,"""self_attention""" )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 1 (Cross Attention).
_lowercase = tax_layer_norm_lookup(_A ,_A ,"""decoder""" ,"""pre_cross_attention_layer_norm""" )
_lowercase = tax_attention_lookup(_A ,_A ,"""decoder""" ,"""encoder_decoder_attention""" )
_lowercase = layer_norm
_lowercase = k.T
_lowercase = o.T
_lowercase = q.T
_lowercase = v.T
# Block i, layer 2 (MLP).
_lowercase = tax_layer_norm_lookup(_A ,_A ,"""decoder""" ,"""pre_mlp_layer_norm""" )
_lowercase = tax_mlp_lookup(_A ,_A ,"""decoder""" ,_A )
_lowercase = layer_norm
if split_mlp_wi:
_lowercase = wi[0].T
_lowercase = wi[1].T
else:
_lowercase = wi.T
_lowercase = wo.T
_lowercase = old['''decoder/decoder_norm/scale''']
_lowercase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowercase = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowercase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowercase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_lowercase = state_dict['''shared.weight''']
return state_dict
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase = checkpoints.load_tax_checkpoint(_A )
_lowercase = convert_tax_to_pytorch(_A ,num_layers=config.num_layers ,is_encoder_only=_A )
_lowercase = make_state_dict(_A ,_A )
model.load_state_dict(_A ,strict=_A )
def __lowerCAmelCase ( _A ,_A ,_A ,_A = False ):
"""simple docstring"""
_lowercase = TaConfig.from_json_file(_A )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowercase = TaEncoderModel(_A )
else:
_lowercase = TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A ,_A ,_A ,_A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("""Done""" )
if __name__ == "__main__":
A_: List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
A_: Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 398 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = tmp_path / '''cache'''
__magic_name__ :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Tuple = features.copy() if features else default_expected_features
__magic_name__ :Union[str, Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :int = ParquetDatasetReader(snake_case, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = tmp_path / '''cache'''
__magic_name__ :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = ParquetDatasetReader(snake_case, cache_dir=snake_case, split=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = parquet_path
elif issubclass(snake_case, snake_case ):
__magic_name__ :Union[str, Any] = [parquet_path]
__magic_name__ :Optional[int] = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :str = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_dataset(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case=("train",) ):
"""simple docstring"""
assert isinstance(snake_case, snake_case )
for split in splits:
__magic_name__ :Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Any = tmp_path / '''cache'''
__magic_name__ :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ :Tuple = ParquetDatasetReader(
{'''train''': parquet_path}, cache_dir=snake_case, keep_in_memory=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tmp_path / '''cache'''
__magic_name__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :int = features.copy() if features else default_expected_features
__magic_name__ :List[Any] = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ :Optional[int] = ParquetDatasetReader({'''train''': parquet_path}, features=snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if split:
__magic_name__ :Dict = {split: parquet_path}
else:
__magic_name__ :Optional[int] = '''train'''
__magic_name__ :Dict = {'''train''': parquet_path, '''test''': parquet_path}
__magic_name__ :List[Any] = tmp_path / '''cache'''
__magic_name__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ :Optional[Any] = ParquetDatasetReader(snake_case, cache_dir=snake_case ).read()
_check_parquet_datasetdict(snake_case, snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__magic_name__ :List[Any] = pf.read()
assert dataset.data.table == output_table
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
__magic_name__ :Tuple = {'''image''': [image_path]}
__magic_name__ :List[Any] = Features({'''image''': Image()} )
__magic_name__ :Tuple = Dataset.from_dict(snake_case, features=snake_case )
__magic_name__ :Union[str, Any] = ParquetDatasetWriter(snake_case, tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__magic_name__ :List[str] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ :List[str] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ), streaming=snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''', [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
], )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert get_writer_batch_size(snake_case ) == expected
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["""MobileViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Optional[Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 |
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Dict = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :Optional[Any] = [int(snake_case ) for i in num_string]
__magic_name__ :Dict = 1
for i in range(0, len(snake_case ) ):
total *= numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
def __lowercase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case, snake_case ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__magic_name__ :str = 0
__magic_name__ :Union[str, Any] = str(snake_case )
while len(snake_case ) != 1:
__magic_name__ :str = [int(snake_case ) for i in num_string]
__magic_name__ :Optional[int] = 0
for i in range(0, len(snake_case ) ):
total += numbers[i]
__magic_name__ :int = str(snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import fire
from utils import calculate_rouge, save_json
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase ):
lowerCamelCase : Dict = [x.strip() for x in open(lowerCamelCase ).readlines()]
lowerCamelCase : Tuple = [x.strip() for x in open(lowerCamelCase ).readlines()][: len(lowerCamelCase )]
lowerCamelCase : Dict = calculate_rouge(lowerCamelCase, lowerCamelCase, **lowerCamelCase )
if save_path is not None:
save_json(lowerCamelCase, lowerCamelCase, indent=lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 681 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-mbart"""
@require_torch
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , )
__magic_name__ :Any = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__magic_name__ :Any = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :str = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__magic_name__ :Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@require_torch_multi_gpu
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=__lowerCAmelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCAmelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=__lowerCAmelCase )
@require_apex
@require_torch_gpu
def A ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__magic_name__ :Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__magic_name__ :Optional[Any] = experiments[experiment_id]
__magic_name__ :List[Any] = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__magic_name__ :Optional[int] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data['''extra_args_str'''] )
__magic_name__ :int = len(re.findall(__lowerCAmelCase , cl.err ) )
self.assertEqual(__lowerCAmelCase , data['''n_matches'''] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__lowerCAmelCase , )
# Check metrics
__magic_name__ :Optional[int] = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :List[str] = [log for log in logs if '''eval_loss''' in log.keys()]
__magic_name__ :Any = eval_metrics[0]
__magic_name__ :int = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , __lowerCAmelCase )
# test if do_predict saves generations and metrics
__magic_name__ :List[Any] = os.listdir(__lowerCAmelCase )
__magic_name__ :List[str] = {os.path.basename(__lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCAmelCase ) -> Tuple[int, float]:
__magic_name__ :str = '''--skip_memory_metrics 0'''
__magic_name__ :Dict = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCAmelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
__magic_name__ :Optional[Any] = TrainerState.load_from_json(Path(__lowerCAmelCase , '''trainer_state.json''' ) ).log_history
__magic_name__ :int = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0 )
__magic_name__ :Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0 )
__magic_name__ :Any = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__magic_name__ , __magic_name__ , __magic_name__ :int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__magic_name__ :Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__magic_name__ :Tuple = gpu_peak_mem_orig + gpu_alloc_mem_orig
__magic_name__ :List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__magic_name__ :Optional[int] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__magic_name__ :Optional[Any] = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__lowerCAmelCase , __lowerCAmelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__lowerCAmelCase , __lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3E-3 , __lowerCAmelCase = "adafactor" , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :int = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__magic_name__ :Dict = self.get_auto_remove_tmp_dir()
__magic_name__ :Tuple = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__magic_name__ :str = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCAmelCase )}
'''.split()
__magic_name__ :Dict = '''
--do_predict
'''.split()
__magic_name__ :Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__magic_name__ :List[Any] = get_gpu_count()
__magic_name__ :Tuple = get_torch_dist_unique_port()
__magic_name__ :Union[str, Any] = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__magic_name__ :Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
else:
__magic_name__ :List[Any] = ['''run_translation.py'''] + args
with patch.object(__lowerCAmelCase , '''argv''' , __lowerCAmelCase ):
main()
return output_dir
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( snake_case : List[Any] , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(snake_case ):
print(F'{i}\t\t{d}' )
def snake_case ( snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] ) -> Dict:
"""simple docstring"""
for j in range(snake_case ):
lowerCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def snake_case ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : str ) -> str:
"""simple docstring"""
lowerCAmelCase = [float('inf' )] * vertex_count
lowerCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
lowerCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowerCAmelCase = distance[u] + w
lowerCAmelCase = check_negative_cycle(snake_case , snake_case , snake_case )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Tuple = int(input("Enter number of vertices: ").strip())
_UpperCamelCase : Any = int(input("Enter number of edges: ").strip())
_UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
_UpperCamelCase : Dict = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
_UpperCamelCase : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
_UpperCamelCase : List[Any] = int(input("\nEnter shortest path source:").strip())
_UpperCamelCase : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 284 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Tuple = """▁"""
snake_case__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
snake_case__ : List[Any] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
snake_case__ : Optional[int] = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __a : int , __a : List[str]="</s>" , __a : List[Any]="<unk>" , __a : str=[] , __a : List[Any] = None , **__a : Optional[Any] , ) ->int:
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase_ : Optional[Any] = vocab_file
lowerCamelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
return self.sp_model.get_piece_size()
def _lowerCAmelCase ( self : str ) ->List[Any]:
lowerCamelCase_ : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) ->Dict:
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : Optional[Any] = None
return state
def __setstate__( self : Any , __a : Any ) ->Any:
lowerCamelCase_ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ : Optional[int] = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self : Tuple , __a : Tuple ) ->List[Any]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[int] , __a : Optional[int] ) ->List[str]:
return self.sp_model.piece_to_id(__lowerCAmelCase )
def _lowerCAmelCase ( self : Dict , __a : Dict ) ->Dict:
if index < self.sp_model.get_piece_size():
lowerCamelCase_ : int = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def _lowerCAmelCase ( self : Dict , __a : int ) ->Optional[int]:
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase_ : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : int = None ) ->List[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 278 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=[1, 1, 2] , lowerCAmelCase=1 , lowerCAmelCase=3_2 , lowerCAmelCase=4 , lowerCAmelCase=8 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=3 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=False , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= block_sizes
__lowercase= num_decoder_layers
__lowercase= d_model
__lowercase= n_head
__lowercase= d_head
__lowercase= d_inner
__lowercase= hidden_act
__lowercase= hidden_dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= 2
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase= n_head
# Used in the tests to check the size of the first hidden state
__lowercase= self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase= sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase= self.num_hidden_layers + 2
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelModel(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
__lowercase= [input_ids, input_mask]
__lowercase= model(__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase= False
__lowercase= TFFunnelModel(config=__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase= False
__lowercase= TFFunnelModel(config=__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelBaseModel(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
__lowercase= [input_ids, input_mask]
__lowercase= model(__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase= False
__lowercase= TFFunnelBaseModel(config=__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase= False
__lowercase= TFFunnelBaseModel(config=__lowerCAmelCase )
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForPreTraining(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForMaskedLM(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= TFFunnelForSequenceClassification(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= TFFunnelForMultipleChoice(config=__lowerCAmelCase )
__lowercase= tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= TFFunnelForTokenClassification(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= TFFunnelForQuestionAnswering(config=__lowerCAmelCase )
__lowercase= {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase= model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
__lowercase
)= config_and_inputs
__lowercase= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : str =(
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
def _A (self ):
__lowercase= TFFunnelModelTester(self )
__lowercase= ConfigTester(self , config_class=__lowerCAmelCase )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@require_tf
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ : Any =False
UpperCamelCase_ : Optional[Any] =False
def _A (self ):
__lowercase= TFFunnelModelTester(self , base=__lowerCAmelCase )
__lowercase= ConfigTester(self , config_class=__lowerCAmelCase )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
| 230 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Tuple = emb.weight.shape
__magic_name__ :int = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :str = emb.weight.data
return lin_layer
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Optional[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__magic_name__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(snake_case )
__magic_name__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__magic_name__ :List[str] = MaMaaaConfig(
vocab_size=snake_case, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
__magic_name__ :int = state_dict['''decoder.embed_tokens.weight''']
__magic_name__ :List[str] = MaMaaaForConditionalGeneration(snake_case )
model.model.load_state_dict(snake_case, strict=snake_case )
__magic_name__ :List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 0 | 0 |
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 0
for ch in input_str:
_lowerCAmelCase = ord(__lowerCamelCase )
_lowerCAmelCase = pow(2, __lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 0 |